diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2a20d82c2..cca3b6d67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,39 +10,25 @@ jobs: strategy: matrix: - go-version: [1.12.x, 1.13.x] - os: [ubuntu-16.04] + go-version: [1.18] + os: [ubuntu-18.04] runs-on: ${{ matrix.os }} steps: - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} + check-latest: true - name: Checkout project - uses: actions/checkout@v2 - - - name: Symlink source into GOPATH for api - run: | - mkdir -p $(go env GOPATH)/src/github.com/sodafoundation/api - sudo ln -s $(pwd) $(go env GOPATH)/src/github.com/sodafoundation/api - - - name: Create the Folder structure and symlink to opensds (TODO:-Fix vendor module from opensds to sodafoundation) - run: | - echo "Check current dir: \n" - pwd - echo "GOPATH: $(go env GOPATH)" - sudo mkdir -p $(go env GOPATH)/src/github.com/opensds - sudo ln -s $(go env GOPATH)/src/github.com/sodafoundation $(go env GOPATH)/src/github.com/opensds + uses: actions/checkout@v3 - name: Install Pre-requisites run: | sudo apt-get update - sudo apt-get install -y build-essential gcc - sudo apt-get install -y librados-dev librbd-dev - sudo apt-get install -y lvm2 tgt open-iscsi - sudo docker pull p1c2u/openapi-spec-validator + sudo apt-get install -y build-essential gcc librados-dev librbd-dev lvm2 tgt open-iscsi + docker pull p1c2u/openapi-spec-validator - name: Build the binaries run: | @@ -53,7 +39,7 @@ jobs: run: ./install/CI/coverage && ./install/CI/test - name: After success run Codecov Coverage tool. - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v2 - name: Clean up the build run: | diff --git a/go.mod b/go.mod index b4d55d332..e29e8737e 100644 --- a/go.mod +++ b/go.mod @@ -1,59 +1,60 @@ module github.com/sodafoundation/api -go 1.12 +go 1.18 require ( - github.com/LINBIT/godrbdutils v0.0.0-20180425110027-65b98a0f103a // indirect - github.com/RoaringBitmap/roaring v0.4.21 // indirect - github.com/appleboy/easyssh-proxy v1.2.0 // indirect - github.com/astaxie/beego v1.12.0 - github.com/beorn7/perks v1.0.0 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/ceph/go-ceph v0.0.0-20170728144007-81e4191e131b // indirect - github.com/coreos/bbolt v1.3.3 // indirect - github.com/coreos/etcd v3.3.11+incompatible - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/frankban/quicktest v1.6.0 // indirect - github.com/getkin/kin-openapi v0.2.0 + github.com/astaxie/beego v1.12.3 + github.com/getkin/kin-openapi v0.94.0 github.com/go-ini/ini v1.41.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect - github.com/golang/protobuf v1.4.1 - github.com/google/btree v1.0.0 // indirect + github.com/golang/protobuf v1.5.2 github.com/gophercloud/gophercloud v0.0.0-20190528082055-3ad89c4ea008 - github.com/gorilla/websocket v1.4.1 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.12.1 // indirect + github.com/satori/go.uuid v1.2.0 + github.com/sodafoundation/controller v1.1.0 + github.com/sodafoundation/dock v1.3.0 + github.com/spf13/cobra v1.4.0 + github.com/stretchr/testify v1.7.1 + go.etcd.io/etcd/client/pkg/v3 v3.5.2 + go.etcd.io/etcd/client/v3 v3.5.2 + google.golang.org/grpc v1.38.0 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/swag v0.19.5 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jonboulle/clockwork v0.1.0 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/netapp/trident v19.10.0+incompatible // indirect - github.com/pierrec/lz4 v2.3.0+incompatible // indirect - github.com/prometheus/client_golang v0.9.2 // indirect - github.com/prometheus/common v0.3.0 // indirect - github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 // indirect - github.com/satori/go.uuid v1.2.0 - github.com/segmentio/kafka-go v0.2.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.11.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.26.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 // indirect - github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/soheilhy/cmux v0.1.4 // indirect - github.com/spf13/cobra v0.0.3 - github.com/stretchr/testify v1.4.0 - github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - github.com/ugorji/go v1.1.7 // indirect - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - go.etcd.io/bbolt v1.3.3 // indirect - go.uber.org/zap v1.12.0 // indirect - golang.org/x/net v0.0.0-20200506145744-7e3656a0809f // indirect - golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f // indirect - golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect - google.golang.org/genproto v0.0.0-20200507105951-43844f6eee31 // indirect - google.golang.org/grpc v1.29.1 - gopkg.in/ini.v1 v1.50.0 // indirect - gopkg.in/yaml.v2 v2.2.4 - k8s.io/api v0.17.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.1.1 // indirect + go.etcd.io/etcd/api/v3 v3.5.2 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.17.0 // indirect + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect + golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 // indirect + golang.org/x/text v0.3.5 // indirect + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/ini.v1 v1.51.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index 0c56c58d6..1be2a25e6 100644 --- a/go.sum +++ b/go.sum @@ -1,60 +1,68 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LINBIT/godrbdutils v0.0.0-20180425110027-65b98a0f103a h1:eYm/KPAarghZq5BGxqEWyOcrIdA3lDya2wDDzIVgRe8= github.com/LINBIT/godrbdutils v0.0.0-20180425110027-65b98a0f103a/go.mod h1:BwaNH2Y7xU4sn0OJj7EMT2ZkUjhBg3Lzp6Hae/0g2+Q= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OwnLocal/goes v1.0.0/go.mod h1:8rIFjBGTue3lCU0wplczcUgt9Gxgrkkrw7etMIcn8TM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/appleboy/easyssh-proxy v1.2.0 h1:KvaUGC18WkBFet+N1oofQy03jkC5HaKFn2XGxFxCTtg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/appleboy/easyssh-proxy v1.2.0/go.mod h1:vHskChUNhxwW4dXMe2MNE/k+UBCkBagrQDm70UWZrS0= -github.com/astaxie/beego v1.11.1 h1:6DESefxW5oMcRLFRKi53/6exzup/IR6N4EzzS1n6CnQ= -github.com/astaxie/beego v1.11.1/go.mod h1:i69hVzgauOPSw5qeyF4GVZhn7Od0yG5bbCGzmhbWxgQ= -github.com/astaxie/beego v1.12.0 h1:MRhVoeeye5N+Flul5PoVfD9CslfdoH+xqC/xvSQ5u2Y= github.com/astaxie/beego v1.12.0/go.mod h1:fysx+LZNZKnvh4GED/xND7jWtjCR6HzydR2Hh2Im57o= +github.com/astaxie/beego v1.12.3 h1:SAQkdD2ePye+v8Gn1r4X6IKZM1wd28EyUOVQ3PDSOOQ= +github.com/astaxie/beego v1.12.3/go.mod h1:p3qIm0Ryx7zeBHLljmd7omloyca1s4yu1a8kM1FkpIA= github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ= github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU= -github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff/go.mod h1:PhH1ZhyCzHKt4uAasyx+ljRCgoezetRNf59CUtwUkqY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/ceph/go-ceph v0.0.0-20170728144007-81e4191e131b h1:1rTBL4h48Zi9z/8rBEDF2l49ab7Mg8Ji61qRnwcwVsw= github.com/ceph/go-ceph v0.0.0-20170728144007-81e4191e131b/go.mod h1:DhWkbjUxN0QRc0xQvpI9QhzqQSzYysRuZVcqSfiStds= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.11+incompatible h1:0gCnqKsq7XxMi69JsnbmMc1o+RJH3XH64sV9aiTTYko= github.com/coreos/etcd v3.3.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= +github.com/couchbase/go-couchbase v0.0.0-20200519150804-63f3cdb75e0d/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= +github.com/couchbase/gomemcached v0.0.0-20200526233749-ec430f949808/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI= github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -62,191 +70,228 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/frankban/quicktest v1.6.0 h1:Cd62nl66vQsx8Uv1t8M0eICyxIwZG7MxiAOrdnnUSW0= github.com/frankban/quicktest v1.6.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/getkin/kin-openapi v0.2.0 h1:PbHHtYZpjKwZtGlIyELgA2DploRrsaXztoNNx9HjwNY= github.com/getkin/kin-openapi v0.2.0/go.mod h1:V1z9xl9oF5Wt7v32ne4FmiF1alpS4dM6mNzoywPOXlk= +github.com/getkin/kin-openapi v0.94.0 h1:bAxg2vxgnHHHoeefVdmGbR+oxtJlcv5HsJJa3qmAHuo= +github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= +github.com/glendc/gopher-json v0.0.0-20170414221815-dc4743023d0c/go.mod h1:Gja1A+xZ9BoviGJNA2E9vFkPjjsl+CoJxSXiQM1UXtw= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-ini/ini v1.41.0 h1:526aoxDtxRHFQKMZfcX2OG9oOI8TJ5yPLM0Mkno/uTY= github.com/go-ini/ini v1.41.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190528082055-3ad89c4ea008 h1:4wWjNKaXmJBuoGZqgNBBzNvXy3vthJtOQfS0V/JfQ0A= github.com/gophercloud/gophercloud v0.0.0-20190528082055-3ad89c4ea008/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6/go.mod h1:n931TsDuKuq+uX4v1fulaMbA/7ZLLhjc85h7chZGBCQ= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/netapp/trident v19.10.0+incompatible h1:CW3bMGMD7wAqiKMqTXfbU9dlVZ3lQS2XZQ8d1aFXOJI= github.com/netapp/trident v19.10.0+incompatible/go.mod h1:Y/ApJGWR8gPjIMX11FbrJy4gvTS3coV+XFopqAWeMwM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= +github.com/peterh/liner v1.0.1-0.20171122030339-3681c2a91233/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk= github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.3.0 h1:taZ4h8Tkxv2kNyoSctBvfXEHmBmxrwmIidZTIaHons4= github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/segmentio/kafka-go v0.2.2 h1:KIUln5unPisRL2yyAkZsDR/coiymN9Djunv6JKGQ6JI= github.com/segmentio/kafka-go v0.2.2/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 h1:X+yvsM2yrEktyI+b2qND5gpH8YhURn0k8OCaeRnkINo= github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= +github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= +github.com/siddontang/goredis v0.0.0-20150324035039-760763f78400/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s= github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg= github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/sodafoundation/controller v1.1.0 h1:P0Jv6xWueN7V6LHd/HgvMsThVgkvMo6Th1DQej6lVYU= +github.com/sodafoundation/controller v1.1.0/go.mod h1:z5OoLJ0TaQilZ9ujYas10T1Z1Op2gDnM4zIW4K5ljMQ= +github.com/sodafoundation/dock v1.3.0 h1:Vs0epg3WOIbz8vwd78t+YWwm2Dg0JAmJjrpI/iAondA= +github.com/sodafoundation/dock v1.3.0/go.mod h1:++aL9Hrz3m5TwudbC1LOuGdjRiYdJdQMi44EAG6gVLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -256,96 +301,130 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/goleveldb v0.0.0-20160425020131-cfa635847112/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd/api/v3 v3.5.2 h1:tXok5yLlKyuQ/SXSjtqHc4uzNaMqZi2XsoSPr/LlJXI= +go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE= +go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.2 h1:WdnejrUtQC4nCxK0/dLTMqKOB+U5TP/2Ya0BJL+1otA= +go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -358,65 +437,83 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20200507105951-43844f6eee31 h1:Bz1qTn2YRWV+9OKJtxHJiQKCiXIdf+kwuKXdt9cBxyU= google.golang.org/genproto v0.0.0-20200507105951-43844f6eee31/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.50.0 h1:c/4YI/GUgB7d2yOkxdsQyYDhW67nWrTl6Zyd9vagYmg= gopkg.in/ini.v1 v1.50.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/install/CI/coverage b/install/CI/coverage index 7a4027dc4..c2ec0f59f 100755 --- a/install/CI/coverage +++ b/install/CI/coverage @@ -20,7 +20,7 @@ echo "" > coverage.txt MODEL_PACKAGE=github.com/sodafoundation/api/pkg/model PROTOBUF_PACKAGE=github.com/sodafoundation/api/pkg/model/proto -for testpkg in $(go list ./osdsctl/... ./client/... ./pkg/... ./contrib/...); do +for testpkg in $(go list ./osdsctl/... ./client/... ./pkg/...); do test $testpkg == "$MODEL_PACKAGE" && continue test $testpkg == "$PROTOBUF_PACKAGE" && continue go test -race -covermode=atomic -coverprofile=profile.out "$testpkg" diff --git a/pkg/api/filter/validation/validation.go b/pkg/api/filter/validation/validation.go index f7d328217..0206146ef 100644 --- a/pkg/api/filter/validation/validation.go +++ b/pkg/api/filter/validation/validation.go @@ -24,24 +24,27 @@ import ( bctx "github.com/astaxie/beego/context" "github.com/getkin/kin-openapi/openapi3" "github.com/getkin/kin-openapi/openapi3filter" + "github.com/getkin/kin-openapi/routers/gorillamux" "github.com/golang/glog" myctx "github.com/sodafoundation/api/pkg/context" ) // Factory returns a fiter function of api request func Factory(filename string) beego.FilterFunc { - swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromFile(filename) + swagger, err := openapi3.NewLoader().LoadFromFile(filename) if err != nil { glog.Warningf("error loading %s api swagger file: %s", filename, err) return func(httpCtx *bctx.Context) {} } + _ = swagger.Validate(context.TODO()) + // Server is not required for finding route swagger.Servers = nil - router := openapi3filter.NewRouter().WithSwagger(swagger) + router, _ := gorillamux.NewRouter(swagger) return func(httpCtx *bctx.Context) { req := httpCtx.Request - route, pathParams, err := router.FindRoute(req.Method, req.URL) + route, pathParams, err := router.FindRoute(req) if err != nil { glog.Errorf("failed to find route from swagger: %s", err) myctx.HttpError(httpCtx, http.StatusBadRequest, "failed to find route %s:%s from swagger: %s", req.Method, req.URL, err) diff --git a/pkg/db/drivers/etcd/client.go b/pkg/db/drivers/etcd/client.go index a5c006cc6..71e45dfc9 100755 --- a/pkg/db/drivers/etcd/client.go +++ b/pkg/db/drivers/etcd/client.go @@ -20,11 +20,11 @@ import ( "sync" "time" - "github.com/coreos/etcd/pkg/transport" "github.com/sodafoundation/api/pkg/utils/config" "github.com/sodafoundation/api/pkg/utils/pwd" + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" - "github.com/coreos/etcd/clientv3" log "github.com/golang/glog" "github.com/sodafoundation/api/pkg/utils" ) diff --git a/pkg/utils/daemon/daemon_test.go b/pkg/utils/daemon/daemon_test.go index 883dbf8f5..e6f226978 100644 --- a/pkg/utils/daemon/daemon_test.go +++ b/pkg/utils/daemon/daemon_test.go @@ -21,6 +21,7 @@ import ( "os" "os/exec" "strings" + "syscall" "testing" ) @@ -95,7 +96,7 @@ func writeToTestFile(t *testing.T, s string) { } func TestHelperProcess(t *testing.T) { - defer os.Exit(0) + defer syscall.Exit(0) args := os.Args for len(args) > 0 { if args[0] == "--" { @@ -106,7 +107,7 @@ func TestHelperProcess(t *testing.T) { } if len(args) == 0 { fmt.Fprint(os.Stderr, "No command\n") - os.Exit(0) + syscall.Exit(0) } writeToTestFile(t, strings.Join(args, " ")) diff --git a/test/e2e/connector/connector.go b/test/e2e/connector/connector.go index aaf2aecc0..a56775927 100644 --- a/test/e2e/connector/connector.go +++ b/test/e2e/connector/connector.go @@ -19,9 +19,9 @@ import ( "fmt" "os" - "github.com/sodafoundation/api/contrib/connector" - _ "github.com/sodafoundation/api/contrib/connector/iscsi" - _ "github.com/sodafoundation/api/contrib/connector/nvmeof" + "github.com/sodafoundation/dock/contrib/connector" + _ "github.com/sodafoundation/dock/contrib/connector/iscsi" + _ "github.com/sodafoundation/dock/contrib/connector/nvmeof" ) const ( @@ -44,7 +44,7 @@ func main() { } accPro := os.Args[3] - fmt.Printf("accpro: %s",accPro) + fmt.Printf("accpro: %s", accPro) switch os.Args[1] { case attachCommand: dev, err := connector.NewConnector(accPro).Attach(connData) diff --git a/test/integration/controller_test.go b/test/integration/controller_test.go index 940c04b0c..14992b270 100755 --- a/test/integration/controller_test.go +++ b/test/integration/controller_test.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build integration // +build integration package integration @@ -20,10 +21,10 @@ import ( "reflect" "testing" - "github.com/sodafoundation/api/pkg/controller/volume" "github.com/sodafoundation/api/pkg/model" pb "github.com/sodafoundation/api/pkg/model/proto" . "github.com/sodafoundation/api/testutils/collection" + "github.com/sodafoundation/controller/pkg/controller/volume" ) var ( diff --git a/vendor/github.com/LINBIT/godrbdutils/LICENSE b/vendor/github.com/LINBIT/godrbdutils/LICENSE deleted file mode 100644 index caf844029..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2017-2018 Roland Kammerer - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/LINBIT/godrbdutils/README.md b/vendor/github.com/LINBIT/godrbdutils/README.md deleted file mode 100644 index c7705f726..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# godrbdutils -Golang library for drbdadm/drbdsetup. For now don't expect the API to be stable in any way. diff --git a/vendor/github.com/LINBIT/godrbdutils/action_string.go b/vendor/github.com/LINBIT/godrbdutils/action_string.go deleted file mode 100644 index 639e4c72c..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/action_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Action"; DO NOT EDIT - -package godrbdutils - -import "fmt" - -const _Action_name = "UpDownAdjustAttachDetachConnectDisconnectPrimarySecondaryCreate_md" - -var _Action_index = [...]uint8{0, 2, 6, 12, 18, 24, 31, 41, 48, 57, 66} - -func (i Action) String() string { - if i < 0 || i >= Action(len(_Action_index)-1) { - return fmt.Sprintf("Action(%d)", i) - } - return _Action_name[_Action_index[i]:_Action_index[i+1]] -} diff --git a/vendor/github.com/LINBIT/godrbdutils/cmd_string.go b/vendor/github.com/LINBIT/godrbdutils/cmd_string.go deleted file mode 100644 index 35629f5a3..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/cmd_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Cmd"; DO NOT EDIT - -package godrbdutils - -import "fmt" - -const _Cmd_name = "DrbdsetupDrbdadm" - -var _Cmd_index = [...]uint8{0, 9, 16} - -func (i Cmd) String() string { - if i < 0 || i >= Cmd(len(_Cmd_index)-1) { - return fmt.Sprintf("Cmd(%d)", i) - } - return _Cmd_name[_Cmd_index[i]:_Cmd_index[i+1]] -} diff --git a/vendor/github.com/LINBIT/godrbdutils/drbdutils.go b/vendor/github.com/LINBIT/godrbdutils/drbdutils.go deleted file mode 100644 index 9697b5fa5..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/drbdutils.go +++ /dev/null @@ -1,152 +0,0 @@ -package godrbdutils - -//go:generate stringer -type=Cmd -//go:generate stringer -type=Action - -import ( - "context" - "fmt" - "os/exec" - "strings" - "time" -) - -type Cmd int -type Action int - -const ( - Drbdsetup Cmd = iota - Drbdadm -) - -const ( - Up Action = iota - Down - Adjust - Attach - Detach - Connect - Disconnect - Primary - Secondary - Create_md -) - -type DrbdCmd struct { - cmd Cmd - action Action - res []string - arg []string - timeout time.Duration -} - -type DrbdAdm struct { - res []string - timeout time.Duration -} - -func NewDrbdAdm(res []string) *DrbdAdm { - return &DrbdAdm{res: res} -} - -func (a *DrbdAdm) Up(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Up, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Down(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Down, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Adjust(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Adjust, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Attach(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Attach, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Detach(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Detach, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Connect(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Connect, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Disconnect(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Disconnect, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Primary(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Primary, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) Secondary(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Secondary, a.res, a.timeout, arg...) -} -func (a *DrbdAdm) CreateMetaData(arg ...string) ([]byte, error) { - return utilExec(Drbdadm, Create_md, a.res, a.timeout, arg...) -} - -func (a *DrbdAdm) SetTimeout(timeout time.Duration) { - a.timeout = timeout -} - -func NewDrbdCmd(cmd Cmd, action Action, res []string, arg ...string) (*DrbdCmd, error) { - c := DrbdCmd{ - cmd: cmd, - action: action, - res: res, - arg: []string{}, - } - c.arg = append(c.arg, arg...) - return &c, nil -} - -func (c *DrbdCmd) SetTimeout(timeout time.Duration) { - c.timeout = timeout -} - -func (c *DrbdCmd) CombinedOutput() ([]byte, error) { - if c.timeout == 0 { - return c.combinedOutput(nil) - } - - ctx, cancel := context.WithTimeout(context.Background(), c.timeout) - defer cancel() - - return c.combinedOutput(ctx) -} - -func (c *DrbdCmd) String() string { - return strings.Join(c.cmdSlice(), " ") -} - -func (c *DrbdCmd) cmdSlice() []string { - var s = []string{ - strings.ToLower(c.cmd.String()), - strings.Replace(strings.ToLower(c.action.String()), "_", "-", -1), - } - s = append(s, c.arg...) - for _, r := range c.res { - s = append(s, r) - } - return s -} - -func (c *DrbdCmd) combinedOutput(ctx context.Context) ([]byte, error) { - argv := c.cmdSlice() - if len(argv) < 2 { - return nil, fmt.Errorf("Command %v too short", argv) - } - - var cmd *exec.Cmd - if ctx != nil { - cmd = exec.CommandContext(ctx, argv[0], argv[1:]...) - } else { - cmd = exec.Command(argv[0], argv[1:]...) - } - - return cmd.CombinedOutput() -} - -func utilExec(cmd Cmd, action Action, res []string, to time.Duration, arg ...string) ([]byte, error) { - c, err := NewDrbdCmd(cmd, action, res, arg...) - if err != nil { - return nil, err - } - c.SetTimeout(to) - return c.CombinedOutput() -} diff --git a/vendor/github.com/LINBIT/godrbdutils/numbers.go b/vendor/github.com/LINBIT/godrbdutils/numbers.go deleted file mode 100644 index b0d7e0794..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/numbers.go +++ /dev/null @@ -1,68 +0,0 @@ -package godrbdutils - -import ( - "errors" - "sort" - "sync" -) - -// GetNumber is used to return a free number within [min,max], where both are >=0 -// It can be used to allocate new Port/Minor numbers -func GetNumber(min, max int, used []int) (int, error) { - if max-min <= 0 || min < 0 || max < 0 { - return -1, errors.New("min and/or max not valid") - } - - // all free, use first one - if len(used) == 0 { - return min, nil - } - - if !sort.IntsAreSorted(used) { - sort.Ints(used) - } - - // use the next after current max if possible - curMax := used[len(used)-1] - if curMax < max { - return curMax + 1, nil - } - - // find a hole - for i := 0; i < len(used)-1; i++ { - cur := used[i] - if used[i+1]-cur > 1 { // found hole - candidate := cur + 1 - if candidate >= min && candidate <= max { - return candidate, nil - } - } - } - - return -1, errors.New("Could not find a free number") -} - -// NumberPool is used as a stateful type to keep track of used numbers -type NumberPool struct { - min, max int - used []int - sync.Mutex -} - -// NewNumberPool is used to allacte a new number pool -func NewNumberPool(min, max int, used []int) *NumberPool { - return &NumberPool{min: min, max: max, used: used} -} - -// Get is used to get a free number -func (n *NumberPool) Get() (int, error) { - n.Lock() - defer n.Unlock() - - num, err := GetNumber(n.min, n.max, n.used) - if err != nil { - return -1, err - } - n.used = append(n.used, num) - return num, nil -} diff --git a/vendor/github.com/LINBIT/godrbdutils/resource.go b/vendor/github.com/LINBIT/godrbdutils/resource.go deleted file mode 100644 index 60ee09090..000000000 --- a/vendor/github.com/LINBIT/godrbdutils/resource.go +++ /dev/null @@ -1,157 +0,0 @@ -package godrbdutils - -import ( - "bytes" - "fmt" - "io/ioutil" - "strings" - "sync" - "time" -) - -type Host struct { - ID int `yaml:"Node-ID,omitempty"` - Name string `yaml:"Hostname,omitempty"` - IP string `yaml:"IP,omitempty"` - volume map[int]volume `yaml:"-"` // key: volume ID -} - -// Volume is a DRBD volume -type volume struct { - id int // DRBD volume ID - backingDevice string - minor int -} - -// Resource is a DRBD resource -type Resource struct { - name string - port int - host map[string]Host // key: hostname - - sync.Mutex -} - -// NewResource returns a new DRBD resource object -func NewResource(name string, port int) *Resource { - return &Resource{name: name, port: port, host: make(map[string]Host)} -} - -func checkVolumes(h Host, v volume) error { - for _, hv := range h.volume { - if hv.id == v.id { - return fmt.Errorf("Host '%s' already has a volume with ID: '%d'", h.Name, v.id) - } - if hv.backingDevice == v.backingDevice { - return fmt.Errorf("Host '%s' already has a volume with Name: '%s'", h.Name, v.backingDevice) - } - if hv.minor == v.minor { - return fmt.Errorf("Host '%s' already has a volume with Minor: '%d'", h.Name, v.minor) - } - } - return nil -} - -// AddVolume adds DRBD volume information to a resource -func (r *Resource) AddVolume(id, minor int, backingDevice, hostname string) error { - v := volume{ - id: id, - minor: minor, - backingDevice: backingDevice, - } - - r.Lock() - defer r.Unlock() - - host, ok := r.host[hostname] - if !ok { - return fmt.Errorf("Could not find existing host with hostname: %v", hostname) - } - - if err := checkVolumes(host, v); err != nil { - return err - } - - host.volume[id] = v - r.host[hostname] = host - - return nil -} - -func checkHosts(r *Resource, h Host) error { - for _, rh := range r.host { - if rh.ID == h.ID { - return fmt.Errorf("Resource '%s' already contains host with Node-ID: '%d'", r.name, h.ID) - } - if rh.Name == h.Name { - return fmt.Errorf("Resource '%s' already contains host with Name: '%s'", r.name, h.Name) - } - if rh.IP == h.IP { - return fmt.Errorf("Resource '%s' already contains host with IP: '%s'", r.name, h.IP) - } - } - return nil -} - -// AddHost adds a host information to a resource -func (r *Resource) AddHost(id int, hostname, ip string) error { - h := Host{ - ID: id, - Name: hostname, - IP: ip, - volume: make(map[int]volume), - } - - r.Lock() - defer r.Unlock() - - if err := checkHosts(r, h); err != nil { - return err - } - r.host[hostname] = h - - return nil -} - -func indentf(level int, format string, a ...interface{}) string { - format = strings.Repeat(" ", level) + format - return fmt.Sprintf(format, a...) -} - -// WriteConfig writes the configuration of a DRBD resource to file parsable by drbd-utils -// It is up to the user to check for errors and to check if the file is valid (and to remove it if it isn't). -func (r *Resource) WriteConfig(filename string) error { - r.Lock() - defer r.Unlock() - - var b bytes.Buffer - - b.WriteString(fmt.Sprintf("# meta-data-json:{\"updated\": \"%s\"}\n", time.Now().UTC())) - b.WriteString(fmt.Sprintf("resource %s {\n", r.name)) - - var hosts []string - for _, h := range r.host { - hosts = append(hosts, h.Name) - - b.WriteString(indentf(1, "on %s {\n", h.Name)) - b.WriteString(indentf(2, "node-id %d;\n", h.ID)) - b.WriteString(indentf(2, "address %s:%d;\n", h.IP, r.port)) - for _, v := range h.volume { - b.WriteString(indentf(2, "volume %d {\n", v.id)) - b.WriteString(indentf(3, "device minor %d;\n", v.minor)) - b.WriteString(indentf(3, "disk %s;\n", v.backingDevice)) - b.WriteString(indentf(3, "meta-disk internal;\n")) - b.WriteString(indentf(2, "}\n")) // end volume section - } - b.WriteString(indentf(1, "}\n")) // end on section - b.WriteString("\n") - } - - b.WriteString(indentf(1, "connection-mesh {\n")) - b.WriteString(indentf(2, "hosts %s;\n", strings.Join(hosts, " "))) - b.WriteString(indentf(1, "}\n")) - - b.WriteString("}") // end resource section - - return ioutil.WriteFile(filename, b.Bytes(), 0644) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/.drone.yml b/vendor/github.com/RoaringBitmap/roaring/.drone.yml deleted file mode 100644 index 698cd0e7a..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/.drone.yml +++ /dev/null @@ -1,20 +0,0 @@ -kind: pipeline -name: default - -workspace: - base: /go - path: src/github.com/RoaringBitmap/roaring - -steps: -- name: test - image: golang - commands: - - go get -t - - go test - - go test -race -run TestConcurrent* - - go build -tags appengine - - go test -tags appengine - - GOARCH=386 go build - - GOARCH=386 go test - - GOARCH=arm go build - - GOARCH=arm64 go build diff --git a/vendor/github.com/RoaringBitmap/roaring/.gitignore b/vendor/github.com/RoaringBitmap/roaring/.gitignore deleted file mode 100644 index b7943ab20..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*~ -roaring-fuzz.zip -workdir -coverage.out -testdata/all3.classic -testdata/all3.msgp.snappy diff --git a/vendor/github.com/RoaringBitmap/roaring/.gitmodules b/vendor/github.com/RoaringBitmap/roaring/.gitmodules deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/RoaringBitmap/roaring/.travis.yml b/vendor/github.com/RoaringBitmap/roaring/.travis.yml deleted file mode 100644 index 8839c14fd..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go -sudo: false -install: -- go get -t github.com/RoaringBitmap/roaring -- go get -t golang.org/x/tools/cmd/cover -- go get -t github.com/mattn/goveralls -- go get -t github.com/mschoch/smat -notifications: - email: false -go: -- "1.7.x" -- "1.8.x" -- "1.9.x" -- "1.10.x" -- "1.11.x" -- "1.12.x" -- tip - -# whitelist -branches: - only: - - master -script: -- goveralls -v -service travis-ci -ignore arraycontainer_gen.go,bitmapcontainer_gen.go,rle16_gen.go,rle_gen.go,roaringarray_gen.go,rle.go || go test -- go test -race -run TestConcurrent* -- go build -tags appengine -- go test -tags appengine -- GOARCH=arm64 go build -- GOARCH=386 go build -- GOARCH=386 go test -- GOARCH=arm go build -- GOARCH=arm64 go build - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/RoaringBitmap/roaring/AUTHORS b/vendor/github.com/RoaringBitmap/roaring/AUTHORS deleted file mode 100644 index 26ec99de9..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/AUTHORS +++ /dev/null @@ -1,11 +0,0 @@ -# This is the official list of roaring authors for copyright purposes. - -Todd Gruben (@tgruben), -Daniel Lemire (@lemire), -Elliot Murphy (@statik), -Bob Potter (@bpot), -Tyson Maly (@tvmaly), -Will Glynn (@willglynn), -Brent Pedersen (@brentp) -Maciej Biłas (@maciej), -Joe Nall (@joenall) diff --git a/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS b/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS deleted file mode 100644 index b1e3a379f..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS +++ /dev/null @@ -1,16 +0,0 @@ -# This is the official list of roaring contributors - -Todd Gruben (@tgruben), -Daniel Lemire (@lemire), -Elliot Murphy (@statik), -Bob Potter (@bpot), -Tyson Maly (@tvmaly), -Will Glynn (@willglynn), -Brent Pedersen (@brentp), -Jason E. Aten (@glycerine), -Vali Malinoiu (@0x4139), -Forud Ghafouri (@fzerorubigd), -Joe Nall (@joenall), -(@fredim), -Edd Robinson (@e-dard), -Alexander Petrov (@alldroll) diff --git a/vendor/github.com/RoaringBitmap/roaring/LICENSE b/vendor/github.com/RoaringBitmap/roaring/LICENSE deleted file mode 100644 index 3ccdd0008..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/LICENSE +++ /dev/null @@ -1,235 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 by the authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================================ - -Portions of runcontainer.go are from the Go standard library, which is licensed -under: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt b/vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt deleted file mode 100644 index aff5f9999..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 by the authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/RoaringBitmap/roaring/Makefile b/vendor/github.com/RoaringBitmap/roaring/Makefile deleted file mode 100644 index 906bd7256..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/Makefile +++ /dev/null @@ -1,111 +0,0 @@ -.PHONY: help all test format fmtcheck vet lint qa deps clean nuke ser fetch-real-roaring-datasets - - - - - - - - -# Display general help about this command -help: - @echo "" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - @echo " make fuzz-smat : Fuzzy testing with smat" - @echo " make fuzz-stream : Fuzzy testing with stream deserialization" - @echo " make fuzz-buffer : Fuzzy testing with buffer deserialization" - @echo "" - -# Alias for help target -all: help -test: - go test - go test -race -run TestConcurrent* -# Format the source code -format: - @find ./ -type f -name "*.go" -exec gofmt -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find ./ -type f -name "*.go" -exec gofmt -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet ./... - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint ./... - - - - - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get github.com/stretchr/testify - GOPATH=$(GOPATH) go get github.com/willf/bitset - GOPATH=$(GOPATH) go get github.com/golang/lint/golint - GOPATH=$(GOPATH) go get github.com/mschoch/smat - GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz - GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz-build - GOPATH=$(GOPATH) go get github.com/glycerine/go-unsnap-stream - GOPATH=$(GOPATH) go get github.com/philhofer/fwd - GOPATH=$(GOPATH) go get github.com/jtolds/gls - -fuzz-smat: - go test -tags=gofuzz -run=TestGenerateSmatCorpus - go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - - -fuzz-stream: - go-fuzz-build -func FuzzSerializationStream github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - - -fuzz-buffer: - go-fuzz-build -func FuzzSerializationBuffer github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... - - -ser: - go generate - -cover: - go test -coverprofile=coverage.out - go tool cover -html=coverage.out - -fetch-real-roaring-datasets: - # pull github.com/RoaringBitmap/real-roaring-datasets -> testdata/real-roaring-datasets - git submodule init - git submodule update diff --git a/vendor/github.com/RoaringBitmap/roaring/README.md b/vendor/github.com/RoaringBitmap/roaring/README.md deleted file mode 100644 index b711d09ec..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/README.md +++ /dev/null @@ -1,247 +0,0 @@ -roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![Coverage Status](https://coveralls.io/repos/github/RoaringBitmap/roaring/badge.svg?branch=master)](https://coveralls.io/github/RoaringBitmap/roaring?branch=master) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) -[![Build Status](https://cloud.drone.io/api/badges/RoaringBitmap/roaring/status.svg)](https://cloud.drone.io/RoaringBitmap/roaring) -============= - -This is a go version of the Roaring bitmap data structure. - - - -Roaring bitmaps are used by several major systems such as [Apache Lucene][lucene] and derivative systems such as [Solr][solr] and -[Elasticsearch][elasticsearch], [Apache Druid (Incubating)][druid], [LinkedIn Pinot][pinot], [Netflix Atlas][atlas], [Apache Spark][spark], [OpenSearchServer][opensearchserver], [Cloud Torrent][cloudtorrent], [Whoosh][whoosh], [Pilosa][pilosa], [Microsoft Visual Studio Team Services (VSTS)][vsts], and eBay's [Apache Kylin][kylin]. - -[lucene]: https://lucene.apache.org/ -[solr]: https://lucene.apache.org/solr/ -[elasticsearch]: https://www.elastic.co/products/elasticsearch -[druid]: https://druid.apache.org/ -[spark]: https://spark.apache.org/ -[opensearchserver]: http://www.opensearchserver.com -[cloudtorrent]: https://github.com/jpillora/cloud-torrent -[whoosh]: https://bitbucket.org/mchaput/whoosh/wiki/Home -[pilosa]: https://www.pilosa.com/ -[kylin]: http://kylin.apache.org/ -[pinot]: http://github.com/linkedin/pinot/wiki -[vsts]: https://www.visualstudio.com/team-services/ -[atlas]: https://github.com/Netflix/atlas - -Roaring bitmaps are found to work well in many important applications: - -> Use Roaring for bitmap compression whenever possible. Do not use other bitmap compression methods ([Wang et al., SIGMOD 2017](http://db.ucsd.edu/wp-content/uploads/2017/03/sidm338-wangA.pdf)) - - -The ``roaring`` Go library is used by -* [Cloud Torrent](https://github.com/jpillora/cloud-torrent): a self-hosted remote torrent client -* [runv](https://github.com/hyperhq/runv): an Hypervisor-based runtime for the Open Containers Initiative -* [InfluxDB](https://www.influxdata.com) -* [Pilosa](https://www.pilosa.com/) -* [Bleve](http://www.blevesearch.com) - -This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com). - - -There are also [Java](https://github.com/RoaringBitmap/RoaringBitmap) and [C/C++](https://github.com/RoaringBitmap/CRoaring) versions. The Java, C, C++ and Go version are binary compatible: e.g, you can save bitmaps -from a Java program and load them back in Go, and vice versa. We have a [format specification](https://github.com/RoaringBitmap/RoaringFormatSpec). - - -This code is licensed under Apache License, Version 2.0 (ASL2.0). - -Copyright 2016-... by the authors. - - -### References - -- Daniel Lemire, Owen Kaser, Nathan Kurz, Luca Deri, Chris O'Hara, François Saint-Jacques, Gregory Ssi-Yan-Kai, Roaring Bitmaps: Implementation of an Optimized Software Library, Software: Practice and Experience 48 (4), 2018 [arXiv:1709.07821](https://arxiv.org/abs/1709.07821) -- Samy Chambi, Daniel Lemire, Owen Kaser, Robert Godin, -Better bitmap performance with Roaring bitmaps, -Software: Practice and Experience 46 (5), 2016. -http://arxiv.org/abs/1402.6407 This paper used data from http://lemire.me/data/realroaring2014.html -- Daniel Lemire, Gregory Ssi-Yan-Kai, Owen Kaser, Consistently faster and smaller compressed bitmaps with Roaring, Software: Practice and Experience 46 (11), 2016. http://arxiv.org/abs/1603.06549 - - -### Dependencies - -Dependencies are fetched automatically by giving the `-t` flag to `go get`. - -they include - - github.com/willf/bitset - - github.com/mschoch/smat - - github.com/glycerine/go-unsnap-stream - - github.com/philhofer/fwd - - github.com/jtolds/gls - -Note that the smat library requires Go 1.6 or better. - -#### Installation - - - go get -t github.com/RoaringBitmap/roaring - - -### Example - -Here is a simplified but complete example: - -```go -package main - -import ( - "fmt" - "github.com/RoaringBitmap/roaring" - "bytes" -) - - -func main() { - // example inspired by https://github.com/fzandona/goroar - fmt.Println("==roaring==") - rb1 := roaring.BitmapOf(1, 2, 3, 4, 5, 100, 1000) - fmt.Println(rb1.String()) - - rb2 := roaring.BitmapOf(3, 4, 1000) - fmt.Println(rb2.String()) - - rb3 := roaring.New() - fmt.Println(rb3.String()) - - fmt.Println("Cardinality: ", rb1.GetCardinality()) - - fmt.Println("Contains 3? ", rb1.Contains(3)) - - rb1.And(rb2) - - rb3.Add(1) - rb3.Add(5) - - rb3.Or(rb1) - - // computes union of the three bitmaps in parallel using 4 workers - roaring.ParOr(4, rb1, rb2, rb3) - // computes intersection of the three bitmaps in parallel using 4 workers - roaring.ParAnd(4, rb1, rb2, rb3) - - - // prints 1, 3, 4, 5, 1000 - i := rb3.Iterator() - for i.HasNext() { - fmt.Println(i.Next()) - } - fmt.Println() - - // next we include an example of serialization - buf := new(bytes.Buffer) - rb1.WriteTo(buf) // we omit error handling - newrb:= roaring.New() - newrb.ReadFrom(buf) - if rb1.Equals(newrb) { - fmt.Println("I wrote the content to a byte stream and read it back.") - } - // you can iterate over bitmaps using ReverseIterator(), Iterator, ManyIterator() -} -``` - -If you wish to use serialization and handle errors, you might want to -consider the following sample of code: - -```go - rb := BitmapOf(1, 2, 3, 4, 5, 100, 1000) - buf := new(bytes.Buffer) - size,err:=rb.WriteTo(buf) - if err != nil { - t.Errorf("Failed writing") - } - newrb:= New() - size,err=newrb.ReadFrom(buf) - if err != nil { - t.Errorf("Failed reading") - } - if ! rb.Equals(newrb) { - t.Errorf("Cannot retrieve serialized version") - } -``` - -Given N integers in [0,x), then the serialized size in bytes of -a Roaring bitmap should never exceed this bound: - -`` 8 + 9 * ((long)x+65535)/65536 + 2 * N `` - -That is, given a fixed overhead for the universe size (x), Roaring -bitmaps never use more than 2 bytes per integer. You can call -``BoundSerializedSizeInBytes`` for a more precise estimate. - - -### Documentation - -Current documentation is available at http://godoc.org/github.com/RoaringBitmap/roaring - -### Goroutine safety - -In general, it should not generally be considered safe to access -the same bitmaps using different goroutines--they are left -unsynchronized for performance. Should you want to access -a Bitmap from more than one goroutine, you should -provide synchronization. Typically this is done by using channels to pass -the *Bitmap around (in Go style; so there is only ever one owner), -or by using `sync.Mutex` to serialize operations on Bitmaps. - -### Coverage - -We test our software. For a report on our test coverage, see - -https://coveralls.io/github/RoaringBitmap/roaring?branch=master - -### Benchmark - -Type - - go test -bench Benchmark -run - - -To run benchmarks on [Real Roaring Datasets](https://github.com/RoaringBitmap/real-roaring-datasets) -run the following: - -```sh -go get github.com/RoaringBitmap/real-roaring-datasets -BENCH_REAL_DATA=1 go test -bench BenchmarkRealData -run - -``` - -### Iterative use - -You can use roaring with gore: - -- go get -u github.com/motemen/gore -- Make sure that ``$GOPATH/bin`` is in your ``$PATH``. -- go get github.com/RoaringBitmap/roaring - -```go -$ gore -gore version 0.2.6 :help for help -gore> :import github.com/RoaringBitmap/roaring -gore> x:=roaring.New() -gore> x.Add(1) -gore> x.String() -"{1}" -``` - - -### Fuzzy testing - -You can help us test further the library with fuzzy testing: - - go get github.com/dvyukov/go-fuzz/go-fuzz - go get github.com/dvyukov/go-fuzz/go-fuzz-build - go test -tags=gofuzz -run=TestGenerateSmatCorpus - go-fuzz-build github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - -Let it run, and if the # of crashers is > 0, check out the reports in -the workdir where you should be able to find the panic goroutine stack -traces. - -### Alternative in Go - -There is a Go version wrapping the C/C++ implementation https://github.com/RoaringBitmap/gocroaring - -For an alternative implementation in Go, see https://github.com/fzandona/goroar -The two versions were written independently. - - -### Mailing list/discussion group - -https://groups.google.com/forum/#!forum/roaring-bitmaps diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go deleted file mode 100644 index 621616f5d..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go +++ /dev/null @@ -1,968 +0,0 @@ -package roaring - -import ( - "fmt" -) - -//go:generate msgp -unexported - -type arrayContainer struct { - content []uint16 -} - -func (ac *arrayContainer) String() string { - s := "{" - for it := ac.getShortIterator(); it.hasNext(); { - s += fmt.Sprintf("%v, ", it.next()) - } - return s + "}" -} - -func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { - for k := 0; k < len(ac.content); k++ { - x[k+i] = uint32(ac.content[k]) | mask - } -} - -func (ac *arrayContainer) getShortIterator() shortPeekable { - return &shortIterator{ac.content, 0} -} - -func (ac *arrayContainer) getReverseIterator() shortIterable { - return &reverseIterator{ac.content, len(ac.content) - 1} -} - -func (ac *arrayContainer) getManyIterator() manyIterable { - return &shortIterator{ac.content, 0} -} - -func (ac *arrayContainer) minimum() uint16 { - return ac.content[0] // assume not empty -} - -func (ac *arrayContainer) maximum() uint16 { - return ac.content[len(ac.content)-1] // assume not empty -} - -func (ac *arrayContainer) getSizeInBytes() int { - return ac.getCardinality() * 2 -} - -func (ac *arrayContainer) serializedSizeInBytes() int { - return ac.getCardinality() * 2 -} - -func arrayContainerSizeInBytes(card int) int { - return card * 2 -} - -// add the values in the range [firstOfRange,endx) -func (ac *arrayContainer) iaddRange(firstOfRange, endx int) container { - if firstOfRange >= endx { - return ac - } - indexstart := binarySearch(ac.content, uint16(firstOfRange)) - if indexstart < 0 { - indexstart = -indexstart - 1 - } - indexend := binarySearch(ac.content, uint16(endx-1)) - if indexend < 0 { - indexend = -indexend - 1 - } else { - indexend++ - } - rangelength := endx - firstOfRange - newcardinality := indexstart + (ac.getCardinality() - indexend) + rangelength - if newcardinality > arrayDefaultMaxSize { - a := ac.toBitmapContainer() - return a.iaddRange(firstOfRange, endx) - } - if cap(ac.content) < newcardinality { - tmp := make([]uint16, newcardinality, newcardinality) - copy(tmp[:indexstart], ac.content[:indexstart]) - copy(tmp[indexstart+rangelength:], ac.content[indexend:]) - - ac.content = tmp - } else { - ac.content = ac.content[:newcardinality] - copy(ac.content[indexstart+rangelength:], ac.content[indexend:]) - - } - for k := 0; k < rangelength; k++ { - ac.content[k+indexstart] = uint16(firstOfRange + k) - } - return ac -} - -// remove the values in the range [firstOfRange,endx) -func (ac *arrayContainer) iremoveRange(firstOfRange, endx int) container { - if firstOfRange >= endx { - return ac - } - indexstart := binarySearch(ac.content, uint16(firstOfRange)) - if indexstart < 0 { - indexstart = -indexstart - 1 - } - indexend := binarySearch(ac.content, uint16(endx-1)) - if indexend < 0 { - indexend = -indexend - 1 - } else { - indexend++ - } - rangelength := indexend - indexstart - answer := ac - copy(answer.content[indexstart:], ac.content[indexstart+rangelength:]) - answer.content = answer.content[:ac.getCardinality()-rangelength] - return answer -} - -// flip the values in the range [firstOfRange,endx) -func (ac *arrayContainer) not(firstOfRange, endx int) container { - if firstOfRange >= endx { - return ac.clone() - } - return ac.notClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1] -} - -// flip the values in the range [firstOfRange,lastOfRange] -func (ac *arrayContainer) notClose(firstOfRange, lastOfRange int) container { - if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange] - return ac.clone() - } - - // determine the span of array indices to be affected^M - startIndex := binarySearch(ac.content, uint16(firstOfRange)) - if startIndex < 0 { - startIndex = -startIndex - 1 - } - lastIndex := binarySearch(ac.content, uint16(lastOfRange)) - if lastIndex < 0 { - lastIndex = -lastIndex - 2 - } - currentValuesInRange := lastIndex - startIndex + 1 - spanToBeFlipped := lastOfRange - firstOfRange + 1 - newValuesInRange := spanToBeFlipped - currentValuesInRange - cardinalityChange := newValuesInRange - currentValuesInRange - newCardinality := len(ac.content) + cardinalityChange - if newCardinality > arrayDefaultMaxSize { - return ac.toBitmapContainer().not(firstOfRange, lastOfRange+1) - } - answer := newArrayContainer() - answer.content = make([]uint16, newCardinality, newCardinality) //a hack for sure - - copy(answer.content, ac.content[:startIndex]) - outPos := startIndex - inPos := startIndex - valInRange := firstOfRange - for ; valInRange <= lastOfRange && inPos <= lastIndex; valInRange++ { - if uint16(valInRange) != ac.content[inPos] { - answer.content[outPos] = uint16(valInRange) - outPos++ - } else { - inPos++ - } - } - - for ; valInRange <= lastOfRange; valInRange++ { - answer.content[outPos] = uint16(valInRange) - outPos++ - } - - for i := lastIndex + 1; i < len(ac.content); i++ { - answer.content[outPos] = ac.content[i] - outPos++ - } - answer.content = answer.content[:newCardinality] - return answer - -} - -func (ac *arrayContainer) equals(o container) bool { - - srb, ok := o.(*arrayContainer) - if ok { - // Check if the containers are the same object. - if ac == srb { - return true - } - - if len(srb.content) != len(ac.content) { - return false - } - - for i, v := range ac.content { - if v != srb.content[i] { - return false - } - } - return true - } - - // use generic comparison - bCard := o.getCardinality() - aCard := ac.getCardinality() - if bCard != aCard { - return false - } - - ait := ac.getShortIterator() - bit := o.getShortIterator() - for ait.hasNext() { - if bit.next() != ait.next() { - return false - } - } - return true -} - -func (ac *arrayContainer) toBitmapContainer() *bitmapContainer { - bc := newBitmapContainer() - bc.loadData(ac) - return bc - -} -func (ac *arrayContainer) iadd(x uint16) (wasNew bool) { - // Special case adding to the end of the container. - l := len(ac.content) - if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x { - ac.content = append(ac.content, x) - return true - } - - loc := binarySearch(ac.content, x) - - if loc < 0 { - s := ac.content - i := -loc - 1 - s = append(s, 0) - copy(s[i+1:], s[i:]) - s[i] = x - ac.content = s - return true - } - return false -} - -func (ac *arrayContainer) iaddReturnMinimized(x uint16) container { - // Special case adding to the end of the container. - l := len(ac.content) - if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x { - ac.content = append(ac.content, x) - return ac - } - - loc := binarySearch(ac.content, x) - - if loc < 0 { - if len(ac.content) >= arrayDefaultMaxSize { - a := ac.toBitmapContainer() - a.iadd(x) - return a - } - s := ac.content - i := -loc - 1 - s = append(s, 0) - copy(s[i+1:], s[i:]) - s[i] = x - ac.content = s - } - return ac -} - -// iremoveReturnMinimized is allowed to change the return type to minimize storage. -func (ac *arrayContainer) iremoveReturnMinimized(x uint16) container { - ac.iremove(x) - return ac -} - -func (ac *arrayContainer) iremove(x uint16) bool { - loc := binarySearch(ac.content, x) - if loc >= 0 { - s := ac.content - s = append(s[:loc], s[loc+1:]...) - ac.content = s - return true - } - return false -} - -func (ac *arrayContainer) remove(x uint16) container { - out := &arrayContainer{make([]uint16, len(ac.content))} - copy(out.content, ac.content[:]) - - loc := binarySearch(out.content, x) - if loc >= 0 { - s := out.content - s = append(s[:loc], s[loc+1:]...) - out.content = s - } - return out -} - -func (ac *arrayContainer) or(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.orArray(x) - case *bitmapContainer: - return x.orArray(ac) - case *runContainer16: - if x.isFull() { - return x.clone() - } - return x.orArray(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) orCardinality(a container) int { - switch x := a.(type) { - case *arrayContainer: - return ac.orArrayCardinality(x) - case *bitmapContainer: - return x.orArrayCardinality(ac) - case *runContainer16: - return x.orArrayCardinality(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) ior(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.iorArray(x) - case *bitmapContainer: - return a.(*bitmapContainer).orArray(ac) - //return ac.iorBitmap(x) // note: this does not make sense - case *runContainer16: - if x.isFull() { - return x.clone() - } - return ac.iorRun16(x) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) iorArray(value2 *arrayContainer) container { - value1 := ac - len1 := value1.getCardinality() - len2 := value2.getCardinality() - maxPossibleCardinality := len1 + len2 - if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap! - bc := newBitmapContainer() - for k := 0; k < len(value2.content); k++ { - v := value2.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - for k := 0; k < len(ac.content); k++ { - v := ac.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - bc.cardinality = int(popcntSlice(bc.bitmap)) - if bc.cardinality <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc - } - if maxPossibleCardinality > cap(value1.content) { - newcontent := make([]uint16, 0, maxPossibleCardinality) - copy(newcontent[len2:maxPossibleCardinality], ac.content[0:len1]) - ac.content = newcontent - } else { - copy(ac.content[len2:maxPossibleCardinality], ac.content[0:len1]) - } - nl := union2by2(value1.content[len2:maxPossibleCardinality], value2.content, ac.content) - ac.content = ac.content[:nl] // reslice to match actual used capacity - return ac -} - -// Note: such code does not make practical sense, except for lazy evaluations -func (ac *arrayContainer) iorBitmap(bc2 *bitmapContainer) container { - bc1 := ac.toBitmapContainer() - bc1.iorBitmap(bc2) - *ac = *newArrayContainerFromBitmap(bc1) - return ac -} - -func (ac *arrayContainer) iorRun16(rc *runContainer16) container { - bc1 := ac.toBitmapContainer() - bc2 := rc.toBitmapContainer() - bc1.iorBitmap(bc2) - *ac = *newArrayContainerFromBitmap(bc1) - return ac -} - -func (ac *arrayContainer) lazyIOR(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.lazyIorArray(x) - case *bitmapContainer: - return ac.lazyIorBitmap(x) - case *runContainer16: - if x.isFull() { - return x.clone() - } - return ac.lazyIorRun16(x) - - } - panic("unsupported container type") -} - -func (ac *arrayContainer) lazyIorArray(ac2 *arrayContainer) container { - // TODO actually make this lazy - return ac.iorArray(ac2) -} - -func (ac *arrayContainer) lazyIorBitmap(bc *bitmapContainer) container { - // TODO actually make this lazy - return ac.iorBitmap(bc) -} - -func (ac *arrayContainer) lazyIorRun16(rc *runContainer16) container { - // TODO actually make this lazy - return ac.iorRun16(rc) -} - -func (ac *arrayContainer) lazyOR(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.lazyorArray(x) - case *bitmapContainer: - return a.lazyOR(ac) - case *runContainer16: - if x.isFull() { - return x.clone() - } - return x.orArray(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) orArray(value2 *arrayContainer) container { - value1 := ac - maxPossibleCardinality := value1.getCardinality() + value2.getCardinality() - if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap! - bc := newBitmapContainer() - for k := 0; k < len(value2.content); k++ { - v := value2.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - for k := 0; k < len(ac.content); k++ { - v := ac.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - bc.cardinality = int(popcntSlice(bc.bitmap)) - if bc.cardinality <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc - } - answer := newArrayContainerCapacity(maxPossibleCardinality) - nl := union2by2(value1.content, value2.content, answer.content) - answer.content = answer.content[:nl] // reslice to match actual used capacity - return answer -} - -func (ac *arrayContainer) orArrayCardinality(value2 *arrayContainer) int { - return union2by2Cardinality(ac.content, value2.content) -} - -func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container { - value1 := ac - maxPossibleCardinality := value1.getCardinality() + value2.getCardinality() - if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap!^M - bc := newBitmapContainer() - for k := 0; k < len(value2.content); k++ { - v := value2.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - for k := 0; k < len(ac.content); k++ { - v := ac.content[k] - i := uint(v) >> 6 - mask := uint64(1) << (v % 64) - bc.bitmap[i] |= mask - } - bc.cardinality = invalidCardinality - return bc - } - answer := newArrayContainerCapacity(maxPossibleCardinality) - nl := union2by2(value1.content, value2.content, answer.content) - answer.content = answer.content[:nl] // reslice to match actual used capacity - return answer -} - -func (ac *arrayContainer) and(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.andArray(x) - case *bitmapContainer: - return x.and(ac) - case *runContainer16: - if x.isFull() { - return ac.clone() - } - return x.andArray(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) andCardinality(a container) int { - switch x := a.(type) { - case *arrayContainer: - return ac.andArrayCardinality(x) - case *bitmapContainer: - return x.andCardinality(ac) - case *runContainer16: - return x.andArrayCardinality(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) intersects(a container) bool { - switch x := a.(type) { - case *arrayContainer: - return ac.intersectsArray(x) - case *bitmapContainer: - return x.intersects(ac) - case *runContainer16: - return x.intersects(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) iand(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.iandArray(x) - case *bitmapContainer: - return ac.iandBitmap(x) - case *runContainer16: - if x.isFull() { - return ac - } - return x.andArray(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) iandBitmap(bc *bitmapContainer) container { - pos := 0 - c := ac.getCardinality() - for k := 0; k < c; k++ { - // branchless - v := ac.content[k] - ac.content[pos] = v - pos += int(bc.bitValue(v)) - } - ac.content = ac.content[:pos] - return ac - -} - -func (ac *arrayContainer) xor(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.xorArray(x) - case *bitmapContainer: - return a.xor(ac) - case *runContainer16: - return x.xorArray(ac) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) xorArray(value2 *arrayContainer) container { - value1 := ac - totalCardinality := value1.getCardinality() + value2.getCardinality() - if totalCardinality > arrayDefaultMaxSize { // it could be a bitmap! - bc := newBitmapContainer() - for k := 0; k < len(value2.content); k++ { - v := value2.content[k] - i := uint(v) >> 6 - bc.bitmap[i] ^= (uint64(1) << (v % 64)) - } - for k := 0; k < len(ac.content); k++ { - v := ac.content[k] - i := uint(v) >> 6 - bc.bitmap[i] ^= (uint64(1) << (v % 64)) - } - bc.computeCardinality() - if bc.cardinality <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc - } - desiredCapacity := totalCardinality - answer := newArrayContainerCapacity(desiredCapacity) - length := exclusiveUnion2by2(value1.content, value2.content, answer.content) - answer.content = answer.content[:length] - return answer - -} - -func (ac *arrayContainer) andNot(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.andNotArray(x) - case *bitmapContainer: - return ac.andNotBitmap(x) - case *runContainer16: - return ac.andNotRun16(x) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) andNotRun16(rc *runContainer16) container { - acb := ac.toBitmapContainer() - rcb := rc.toBitmapContainer() - return acb.andNotBitmap(rcb) -} - -func (ac *arrayContainer) iandNot(a container) container { - switch x := a.(type) { - case *arrayContainer: - return ac.iandNotArray(x) - case *bitmapContainer: - return ac.iandNotBitmap(x) - case *runContainer16: - return ac.iandNotRun16(x) - } - panic("unsupported container type") -} - -func (ac *arrayContainer) iandNotRun16(rc *runContainer16) container { - rcb := rc.toBitmapContainer() - acb := ac.toBitmapContainer() - acb.iandNotBitmapSurely(rcb) - *ac = *(acb.toArrayContainer()) - return ac -} - -func (ac *arrayContainer) andNotArray(value2 *arrayContainer) container { - value1 := ac - desiredcapacity := value1.getCardinality() - answer := newArrayContainerCapacity(desiredcapacity) - length := difference(value1.content, value2.content, answer.content) - answer.content = answer.content[:length] - return answer -} - -func (ac *arrayContainer) iandNotArray(value2 *arrayContainer) container { - length := difference(ac.content, value2.content, ac.content) - ac.content = ac.content[:length] - return ac -} - -func (ac *arrayContainer) andNotBitmap(value2 *bitmapContainer) container { - desiredcapacity := ac.getCardinality() - answer := newArrayContainerCapacity(desiredcapacity) - answer.content = answer.content[:desiredcapacity] - pos := 0 - for _, v := range ac.content { - answer.content[pos] = v - pos += 1 - int(value2.bitValue(v)) - } - answer.content = answer.content[:pos] - return answer -} - -func (ac *arrayContainer) andBitmap(value2 *bitmapContainer) container { - desiredcapacity := ac.getCardinality() - answer := newArrayContainerCapacity(desiredcapacity) - answer.content = answer.content[:desiredcapacity] - pos := 0 - for _, v := range ac.content { - answer.content[pos] = v - pos += int(value2.bitValue(v)) - } - answer.content = answer.content[:pos] - return answer -} - -func (ac *arrayContainer) iandNotBitmap(value2 *bitmapContainer) container { - pos := 0 - for _, v := range ac.content { - ac.content[pos] = v - pos += 1 - int(value2.bitValue(v)) - } - ac.content = ac.content[:pos] - return ac -} - -func copyOf(array []uint16, size int) []uint16 { - result := make([]uint16, size) - for i, x := range array { - if i == size { - break - } - result[i] = x - } - return result -} - -// flip the values in the range [firstOfRange,endx) -func (ac *arrayContainer) inot(firstOfRange, endx int) container { - if firstOfRange >= endx { - return ac - } - return ac.inotClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1] -} - -// flip the values in the range [firstOfRange,lastOfRange] -func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container { - if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange] - return ac - } - // determine the span of array indices to be affected - startIndex := binarySearch(ac.content, uint16(firstOfRange)) - if startIndex < 0 { - startIndex = -startIndex - 1 - } - lastIndex := binarySearch(ac.content, uint16(lastOfRange)) - if lastIndex < 0 { - lastIndex = -lastIndex - 1 - 1 - } - currentValuesInRange := lastIndex - startIndex + 1 - spanToBeFlipped := lastOfRange - firstOfRange + 1 - - newValuesInRange := spanToBeFlipped - currentValuesInRange - buffer := make([]uint16, newValuesInRange) - cardinalityChange := newValuesInRange - currentValuesInRange - newCardinality := len(ac.content) + cardinalityChange - if cardinalityChange > 0 { - if newCardinality > len(ac.content) { - if newCardinality > arrayDefaultMaxSize { - bcRet := ac.toBitmapContainer() - bcRet.inot(firstOfRange, lastOfRange+1) - *ac = *bcRet.toArrayContainer() - return bcRet - } - ac.content = copyOf(ac.content, newCardinality) - } - base := lastIndex + 1 - copy(ac.content[lastIndex+1+cardinalityChange:], ac.content[base:base+len(ac.content)-1-lastIndex]) - ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1) - } else { // no expansion needed - ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1) - if cardinalityChange < 0 { - - for i := startIndex + newValuesInRange; i < newCardinality; i++ { - ac.content[i] = ac.content[i-cardinalityChange] - } - } - } - ac.content = ac.content[:newCardinality] - return ac -} - -func (ac *arrayContainer) negateRange(buffer []uint16, startIndex, lastIndex, startRange, lastRange int) { - // compute the negation into buffer - outPos := 0 - inPos := startIndex // value here always >= valInRange, - // until it is exhausted - // n.b., we can start initially exhausted. - - valInRange := startRange - for ; valInRange < lastRange && inPos <= lastIndex; valInRange++ { - if uint16(valInRange) != ac.content[inPos] { - buffer[outPos] = uint16(valInRange) - outPos++ - } else { - inPos++ - } - } - - // if there are extra items (greater than the biggest - // pre-existing one in range), buffer them - for ; valInRange < lastRange; valInRange++ { - buffer[outPos] = uint16(valInRange) - outPos++ - } - - if outPos != len(buffer) { - panic("negateRange: internal bug") - } - - for i, item := range buffer { - ac.content[i+startIndex] = item - } -} - -func (ac *arrayContainer) isFull() bool { - return false -} - -func (ac *arrayContainer) andArray(value2 *arrayContainer) container { - desiredcapacity := minOfInt(ac.getCardinality(), value2.getCardinality()) - answer := newArrayContainerCapacity(desiredcapacity) - length := intersection2by2( - ac.content, - value2.content, - answer.content) - answer.content = answer.content[:length] - return answer -} - -func (ac *arrayContainer) andArrayCardinality(value2 *arrayContainer) int { - return intersection2by2Cardinality( - ac.content, - value2.content) -} - -func (ac *arrayContainer) intersectsArray(value2 *arrayContainer) bool { - return intersects2by2( - ac.content, - value2.content) -} - -func (ac *arrayContainer) iandArray(value2 *arrayContainer) container { - length := intersection2by2( - ac.content, - value2.content, - ac.content) - ac.content = ac.content[:length] - return ac -} - -func (ac *arrayContainer) getCardinality() int { - return len(ac.content) -} - -func (ac *arrayContainer) rank(x uint16) int { - answer := binarySearch(ac.content, x) - if answer >= 0 { - return answer + 1 - } - return -answer - 1 - -} - -func (ac *arrayContainer) selectInt(x uint16) int { - return int(ac.content[x]) -} - -func (ac *arrayContainer) clone() container { - ptr := arrayContainer{make([]uint16, len(ac.content))} - copy(ptr.content, ac.content[:]) - return &ptr -} - -func (ac *arrayContainer) contains(x uint16) bool { - return binarySearch(ac.content, x) >= 0 -} - -func (ac *arrayContainer) loadData(bitmapContainer *bitmapContainer) { - ac.content = make([]uint16, bitmapContainer.cardinality, bitmapContainer.cardinality) - bitmapContainer.fillArray(ac.content) -} -func newArrayContainer() *arrayContainer { - p := new(arrayContainer) - return p -} - -func newArrayContainerFromBitmap(bc *bitmapContainer) *arrayContainer { - ac := &arrayContainer{} - ac.loadData(bc) - return ac -} - -func newArrayContainerCapacity(size int) *arrayContainer { - p := new(arrayContainer) - p.content = make([]uint16, 0, size) - return p -} - -func newArrayContainerSize(size int) *arrayContainer { - p := new(arrayContainer) - p.content = make([]uint16, size, size) - return p -} - -func newArrayContainerRange(firstOfRun, lastOfRun int) *arrayContainer { - valuesInRange := lastOfRun - firstOfRun + 1 - this := newArrayContainerCapacity(valuesInRange) - for i := 0; i < valuesInRange; i++ { - this.content = append(this.content, uint16(firstOfRun+i)) - } - return this -} - -func (ac *arrayContainer) numberOfRuns() (nr int) { - n := len(ac.content) - var runlen uint16 - var cur, prev uint16 - - switch n { - case 0: - return 0 - case 1: - return 1 - default: - for i := 1; i < n; i++ { - prev = ac.content[i-1] - cur = ac.content[i] - - if cur == prev+1 { - runlen++ - } else { - if cur < prev { - panic("then fundamental arrayContainer assumption of sorted ac.content was broken") - } - if cur == prev { - panic("then fundamental arrayContainer assumption of deduplicated content was broken") - } else { - nr++ - runlen = 0 - } - } - } - nr++ - } - return -} - -// convert to run or array *if needed* -func (ac *arrayContainer) toEfficientContainer() container { - - numRuns := ac.numberOfRuns() - - sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns) - sizeAsBitmapContainer := bitmapContainerSizeInBytes() - card := ac.getCardinality() - sizeAsArrayContainer := arrayContainerSizeInBytes(card) - - if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) { - return newRunContainer16FromArray(ac) - } - if card <= arrayDefaultMaxSize { - return ac - } - return ac.toBitmapContainer() -} - -func (ac *arrayContainer) containerType() contype { - return arrayContype -} - -func (ac *arrayContainer) addOffset(x uint16) []container { - low := &arrayContainer{} - high := &arrayContainer{} - for _, val := range ac.content { - y := uint32(val) + uint32(x) - if highbits(y) > 0 { - high.content = append(high.content, lowbits(y)) - } else { - low.content = append(low.content, lowbits(y)) - } - } - return []container{low, high} -} diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go deleted file mode 100644 index 6ee670ee5..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go +++ /dev/null @@ -1,134 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbzg uint32 - zbzg, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbzg > 0 { - zbzg-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "content": - var zbai uint32 - zbai, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.content) >= int(zbai) { - z.content = (z.content)[:zbai] - } else { - z.content = make([]uint16, zbai) - } - for zxvk := range z.content { - z.content[zxvk], err = dc.ReadUint16() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "content" - err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.content))) - if err != nil { - return - } - for zxvk := range z.content { - err = en.WriteUint16(z.content[zxvk]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "content" - o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74) - o = msgp.AppendArrayHeader(o, uint32(len(z.content))) - for zxvk := range z.content { - o = msgp.AppendUint16(o, z.content[zxvk]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zcmr uint32 - zcmr, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcmr > 0 { - zcmr-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "content": - var zajw uint32 - zajw, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.content) >= int(zajw) { - z.content = (z.content)[:zajw] - } else { - z.content = make([]uint16, zajw) - } - for zxvk := range z.content { - z.content[zxvk], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *arrayContainer) Msgsize() (s int) { - s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size)) - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go deleted file mode 100644 index e749721bb..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go +++ /dev/null @@ -1,1086 +0,0 @@ -package roaring - -import ( - "fmt" - "unsafe" -) - -//go:generate msgp -unexported - -type bitmapContainer struct { - cardinality int - bitmap []uint64 -} - -func (bc bitmapContainer) String() string { - var s string - for it := bc.getShortIterator(); it.hasNext(); { - s += fmt.Sprintf("%v, ", it.next()) - } - return s -} - -func newBitmapContainer() *bitmapContainer { - p := new(bitmapContainer) - size := (1 << 16) / 64 - p.bitmap = make([]uint64, size, size) - return p -} - -func newBitmapContainerwithRange(firstOfRun, lastOfRun int) *bitmapContainer { - bc := newBitmapContainer() - bc.cardinality = lastOfRun - firstOfRun + 1 - if bc.cardinality == maxCapacity { - fill(bc.bitmap, uint64(0xffffffffffffffff)) - } else { - firstWord := firstOfRun / 64 - lastWord := lastOfRun / 64 - zeroPrefixLength := uint64(firstOfRun & 63) - zeroSuffixLength := uint64(63 - (lastOfRun & 63)) - - fillRange(bc.bitmap, firstWord, lastWord+1, uint64(0xffffffffffffffff)) - bc.bitmap[firstWord] ^= ((uint64(1) << zeroPrefixLength) - 1) - blockOfOnes := (uint64(1) << zeroSuffixLength) - 1 - maskOnLeft := blockOfOnes << (uint64(64) - zeroSuffixLength) - bc.bitmap[lastWord] ^= maskOnLeft - } - return bc -} - -func (bc *bitmapContainer) minimum() uint16 { - for i := 0; i < len(bc.bitmap); i++ { - w := bc.bitmap[i] - if w != 0 { - r := countTrailingZeros(w) - return uint16(r + i*64) - } - } - return MaxUint16 -} - -// i should be non-zero -func clz(i uint64) int { - n := 1 - x := uint32(i >> 32) - if x == 0 { - n += 32 - x = uint32(i) - } - if x>>16 == 0 { - n += 16 - x = x << 16 - } - if x>>24 == 0 { - n += 8 - x = x << 8 - } - if x>>28 == 0 { - n += 4 - x = x << 4 - } - if x>>30 == 0 { - n += 2 - x = x << 2 - } - return n - int(x>>31) -} - -func (bc *bitmapContainer) maximum() uint16 { - for i := len(bc.bitmap); i > 0; i-- { - w := bc.bitmap[i-1] - if w != 0 { - r := clz(w) - return uint16((i-1)*64 + 63 - r) - } - } - return uint16(0) -} - -type bitmapContainerShortIterator struct { - ptr *bitmapContainer - i int -} - -func (bcsi *bitmapContainerShortIterator) next() uint16 { - j := bcsi.i - bcsi.i = bcsi.ptr.NextSetBit(bcsi.i + 1) - return uint16(j) -} -func (bcsi *bitmapContainerShortIterator) hasNext() bool { - return bcsi.i >= 0 -} - -func (bcsi *bitmapContainerShortIterator) peekNext() uint16 { - return uint16(bcsi.i) -} - -func (bcsi *bitmapContainerShortIterator) advanceIfNeeded(minval uint16) { - if bcsi.hasNext() && bcsi.peekNext() < minval { - bcsi.i = bcsi.ptr.NextSetBit(int(minval)) - } -} - -func newBitmapContainerShortIterator(a *bitmapContainer) *bitmapContainerShortIterator { - return &bitmapContainerShortIterator{a, a.NextSetBit(0)} -} - -func (bc *bitmapContainer) getShortIterator() shortPeekable { - return newBitmapContainerShortIterator(bc) -} - -type reverseBitmapContainerShortIterator struct { - ptr *bitmapContainer - i int -} - -func (bcsi *reverseBitmapContainerShortIterator) next() uint16 { - if bcsi.i == -1 { - panic("reverseBitmapContainerShortIterator.next() going beyond what is available") - } - - j := bcsi.i - bcsi.i = bcsi.ptr.PrevSetBit(bcsi.i - 1) - return uint16(j) -} - -func (bcsi *reverseBitmapContainerShortIterator) hasNext() bool { - return bcsi.i >= 0 -} - -func newReverseBitmapContainerShortIterator(a *bitmapContainer) *reverseBitmapContainerShortIterator { - if a.cardinality == 0 { - return &reverseBitmapContainerShortIterator{a, -1} - } - return &reverseBitmapContainerShortIterator{a, int(a.maximum())} -} - -func (bc *bitmapContainer) getReverseIterator() shortIterable { - return newReverseBitmapContainerShortIterator(bc) -} - -type bitmapContainerManyIterator struct { - ptr *bitmapContainer - base int - bitset uint64 -} - -func (bcmi *bitmapContainerManyIterator) nextMany(hs uint32, buf []uint32) int { - n := 0 - base := bcmi.base - bitset := bcmi.bitset - - for n < len(buf) { - if bitset == 0 { - base++ - if base >= len(bcmi.ptr.bitmap) { - bcmi.base = base - bcmi.bitset = bitset - return n - } - bitset = bcmi.ptr.bitmap[base] - continue - } - t := bitset & -bitset - buf[n] = uint32(((base * 64) + int(popcount(t-1)))) | hs - n = n + 1 - bitset ^= t - } - - bcmi.base = base - bcmi.bitset = bitset - return n -} - -func newBitmapContainerManyIterator(a *bitmapContainer) *bitmapContainerManyIterator { - return &bitmapContainerManyIterator{a, -1, 0} -} - -func (bc *bitmapContainer) getManyIterator() manyIterable { - return newBitmapContainerManyIterator(bc) -} - -func (bc *bitmapContainer) getSizeInBytes() int { - return len(bc.bitmap) * 8 // + bcBaseBytes -} - -func (bc *bitmapContainer) serializedSizeInBytes() int { - //return bc.Msgsize()// NOO! This breaks GetSerializedSizeInBytes - return len(bc.bitmap) * 8 -} - -const bcBaseBytes = int(unsafe.Sizeof(bitmapContainer{})) - -// bitmapContainer doesn't depend on card, always fully allocated -func bitmapContainerSizeInBytes() int { - return bcBaseBytes + (1<<16)/8 -} - -func bitmapEquals(a, b []uint64) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { - // TODO: should be written as optimized assembly - pos := i - base := mask - for k := 0; k < len(bc.bitmap); k++ { - bitset := bc.bitmap[k] - for bitset != 0 { - t := bitset & -bitset - x[pos] = base + uint32(popcount(t-1)) - pos++ - bitset ^= t - } - base += 64 - } -} - -func (bc *bitmapContainer) equals(o container) bool { - srb, ok := o.(*bitmapContainer) - if ok { - if srb.cardinality != bc.cardinality { - return false - } - return bitmapEquals(bc.bitmap, srb.bitmap) - } - - // use generic comparison - if bc.getCardinality() != o.getCardinality() { - return false - } - ait := o.getShortIterator() - bit := bc.getShortIterator() - - for ait.hasNext() { - if bit.next() != ait.next() { - return false - } - } - return true -} - -func (bc *bitmapContainer) iaddReturnMinimized(i uint16) container { - bc.iadd(i) - if bc.isFull() { - return newRunContainer16Range(0, MaxUint16) - } - return bc -} - -func (bc *bitmapContainer) iadd(i uint16) bool { - x := int(i) - previous := bc.bitmap[x/64] - mask := uint64(1) << (uint(x) % 64) - newb := previous | mask - bc.bitmap[x/64] = newb - bc.cardinality += int((previous ^ newb) >> (uint(x) % 64)) - return newb != previous -} - -func (bc *bitmapContainer) iremoveReturnMinimized(i uint16) container { - if bc.iremove(i) { - if bc.cardinality == arrayDefaultMaxSize { - return bc.toArrayContainer() - } - } - return bc -} - -// iremove returns true if i was found. -func (bc *bitmapContainer) iremove(i uint16) bool { - if bc.contains(i) { - bc.cardinality-- - bc.bitmap[i/64] &^= (uint64(1) << (i % 64)) - return true - } - return false -} - -func (bc *bitmapContainer) isFull() bool { - return bc.cardinality == int(MaxUint16)+1 -} - -func (bc *bitmapContainer) getCardinality() int { - return bc.cardinality -} - -func (bc *bitmapContainer) clone() container { - ptr := bitmapContainer{bc.cardinality, make([]uint64, len(bc.bitmap))} - copy(ptr.bitmap, bc.bitmap[:]) - return &ptr -} - -// add all values in range [firstOfRange,lastOfRange) -func (bc *bitmapContainer) iaddRange(firstOfRange, lastOfRange int) container { - bc.cardinality += setBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, lastOfRange) - return bc -} - -// remove all values in range [firstOfRange,lastOfRange) -func (bc *bitmapContainer) iremoveRange(firstOfRange, lastOfRange int) container { - bc.cardinality += resetBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, lastOfRange) - if bc.getCardinality() <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc -} - -// flip all values in range [firstOfRange,endx) -func (bc *bitmapContainer) inot(firstOfRange, endx int) container { - if endx-firstOfRange == maxCapacity { - flipBitmapRange(bc.bitmap, firstOfRange, endx) - bc.cardinality = maxCapacity - bc.cardinality - } else if endx-firstOfRange > maxCapacity/2 { - flipBitmapRange(bc.bitmap, firstOfRange, endx) - bc.computeCardinality() - } else { - bc.cardinality += flipBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, endx) - } - if bc.getCardinality() <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc -} - -// flip all values in range [firstOfRange,endx) -func (bc *bitmapContainer) not(firstOfRange, endx int) container { - answer := bc.clone() - return answer.inot(firstOfRange, endx) -} - -func (bc *bitmapContainer) or(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.orArray(x) - case *bitmapContainer: - return bc.orBitmap(x) - case *runContainer16: - if x.isFull() { - return x.clone() - } - return x.orBitmapContainer(bc) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) orCardinality(a container) int { - switch x := a.(type) { - case *arrayContainer: - return bc.orArrayCardinality(x) - case *bitmapContainer: - return bc.orBitmapCardinality(x) - case *runContainer16: - return x.orBitmapContainerCardinality(bc) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) ior(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.iorArray(x) - case *bitmapContainer: - return bc.iorBitmap(x) - case *runContainer16: - if x.isFull() { - return x.clone() - } - for i := range x.iv { - bc.iaddRange(int(x.iv[i].start), int(x.iv[i].last())+1) - } - if bc.isFull() { - return newRunContainer16Range(0, MaxUint16) - } - //bc.computeCardinality() - return bc - } - panic(fmt.Errorf("unsupported container type %T", a)) -} - -func (bc *bitmapContainer) lazyIOR(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.lazyIORArray(x) - case *bitmapContainer: - return bc.lazyIORBitmap(x) - case *runContainer16: - if x.isFull() { - return x.clone() - } - - // Manually inlined setBitmapRange function - bitmap := bc.bitmap - for _, iv := range x.iv { - start := int(iv.start) - end := int(iv.last()) + 1 - if start >= end { - continue - } - firstword := start / 64 - endword := (end - 1) / 64 - if firstword == endword { - bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64)) - continue - } - bitmap[firstword] |= ^uint64(0) << uint(start%64) - for i := firstword + 1; i < endword; i++ { - bitmap[i] = ^uint64(0) - } - bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64) - } - bc.cardinality = invalidCardinality - return bc - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) lazyOR(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.lazyORArray(x) - case *bitmapContainer: - return bc.lazyORBitmap(x) - case *runContainer16: - if x.isFull() { - return x.clone() - } - // TODO: implement lazy OR - return x.orBitmapContainer(bc) - - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) orArray(value2 *arrayContainer) container { - answer := bc.clone().(*bitmapContainer) - c := value2.getCardinality() - for k := 0; k < c; k++ { - v := value2.content[k] - i := uint(v) >> 6 - bef := answer.bitmap[i] - aft := bef | (uint64(1) << (v % 64)) - answer.bitmap[i] = aft - answer.cardinality += int((bef - aft) >> 63) - } - return answer -} - -func (bc *bitmapContainer) orArrayCardinality(value2 *arrayContainer) int { - answer := 0 - c := value2.getCardinality() - for k := 0; k < c; k++ { - // branchless: - v := value2.content[k] - i := uint(v) >> 6 - bef := bc.bitmap[i] - aft := bef | (uint64(1) << (v % 64)) - answer += int((bef - aft) >> 63) - } - return answer -} - -func (bc *bitmapContainer) orBitmap(value2 *bitmapContainer) container { - answer := newBitmapContainer() - for k := 0; k < len(answer.bitmap); k++ { - answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k] - } - answer.computeCardinality() - if answer.isFull() { - return newRunContainer16Range(0, MaxUint16) - } - return answer -} - -func (bc *bitmapContainer) orBitmapCardinality(value2 *bitmapContainer) int { - return int(popcntOrSlice(bc.bitmap, value2.bitmap)) -} - -func (bc *bitmapContainer) andBitmapCardinality(value2 *bitmapContainer) int { - return int(popcntAndSlice(bc.bitmap, value2.bitmap)) -} - -func (bc *bitmapContainer) computeCardinality() { - bc.cardinality = int(popcntSlice(bc.bitmap)) -} - -func (bc *bitmapContainer) iorArray(ac *arrayContainer) container { - for k := range ac.content { - vc := ac.content[k] - i := uint(vc) >> 6 - bef := bc.bitmap[i] - aft := bef | (uint64(1) << (vc % 64)) - bc.bitmap[i] = aft - bc.cardinality += int((bef - aft) >> 63) - } - if bc.isFull() { - return newRunContainer16Range(0, MaxUint16) - } - return bc -} - -func (bc *bitmapContainer) iorBitmap(value2 *bitmapContainer) container { - answer := bc - answer.cardinality = 0 - for k := 0; k < len(answer.bitmap); k++ { - answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k] - } - answer.computeCardinality() - if bc.isFull() { - return newRunContainer16Range(0, MaxUint16) - } - return answer -} - -func (bc *bitmapContainer) lazyIORArray(value2 *arrayContainer) container { - answer := bc - c := value2.getCardinality() - for k := 0; k+3 < c; k += 4 { - content := (*[4]uint16)(unsafe.Pointer(&value2.content[k])) - vc0 := content[0] - i0 := uint(vc0) >> 6 - answer.bitmap[i0] = answer.bitmap[i0] | (uint64(1) << (vc0 % 64)) - - vc1 := content[1] - i1 := uint(vc1) >> 6 - answer.bitmap[i1] = answer.bitmap[i1] | (uint64(1) << (vc1 % 64)) - - vc2 := content[2] - i2 := uint(vc2) >> 6 - answer.bitmap[i2] = answer.bitmap[i2] | (uint64(1) << (vc2 % 64)) - - vc3 := content[3] - i3 := uint(vc3) >> 6 - answer.bitmap[i3] = answer.bitmap[i3] | (uint64(1) << (vc3 % 64)) - } - - for k := c &^ 3; k < c; k++ { - vc := value2.content[k] - i := uint(vc) >> 6 - answer.bitmap[i] = answer.bitmap[i] | (uint64(1) << (vc % 64)) - } - - answer.cardinality = invalidCardinality - return answer -} - -func (bc *bitmapContainer) lazyORArray(value2 *arrayContainer) container { - answer := bc.clone().(*bitmapContainer) - return answer.lazyIORArray(value2) -} - -func (bc *bitmapContainer) lazyIORBitmap(value2 *bitmapContainer) container { - answer := bc - for k := 0; k < len(answer.bitmap); k++ { - answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k] - } - bc.cardinality = invalidCardinality - return answer -} - -func (bc *bitmapContainer) lazyORBitmap(value2 *bitmapContainer) container { - answer := bc.clone().(*bitmapContainer) - return answer.lazyIORBitmap(value2) -} - -func (bc *bitmapContainer) xor(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.xorArray(x) - case *bitmapContainer: - return bc.xorBitmap(x) - case *runContainer16: - return x.xorBitmap(bc) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) xorArray(value2 *arrayContainer) container { - answer := bc.clone().(*bitmapContainer) - c := value2.getCardinality() - for k := 0; k < c; k++ { - vc := value2.content[k] - index := uint(vc) >> 6 - abi := answer.bitmap[index] - mask := uint64(1) << (vc % 64) - answer.cardinality += 1 - 2*int((abi&mask)>>(vc%64)) - answer.bitmap[index] = abi ^ mask - } - if answer.cardinality <= arrayDefaultMaxSize { - return answer.toArrayContainer() - } - return answer -} - -func (bc *bitmapContainer) rank(x uint16) int { - // TODO: rewrite in assembly - leftover := (uint(x) + 1) & 63 - if leftover == 0 { - return int(popcntSlice(bc.bitmap[:(uint(x)+1)/64])) - } - return int(popcntSlice(bc.bitmap[:(uint(x)+1)/64]) + popcount(bc.bitmap[(uint(x)+1)/64]<<(64-leftover))) -} - -func (bc *bitmapContainer) selectInt(x uint16) int { - remaining := x - for k := 0; k < len(bc.bitmap); k++ { - w := popcount(bc.bitmap[k]) - if uint16(w) > remaining { - return k*64 + selectBitPosition(bc.bitmap[k], int(remaining)) - } - remaining -= uint16(w) - } - return -1 -} - -func (bc *bitmapContainer) xorBitmap(value2 *bitmapContainer) container { - newCardinality := int(popcntXorSlice(bc.bitmap, value2.bitmap)) - - if newCardinality > arrayDefaultMaxSize { - answer := newBitmapContainer() - for k := 0; k < len(answer.bitmap); k++ { - answer.bitmap[k] = bc.bitmap[k] ^ value2.bitmap[k] - } - answer.cardinality = newCardinality - if answer.isFull() { - return newRunContainer16Range(0, MaxUint16) - } - return answer - } - ac := newArrayContainerSize(newCardinality) - fillArrayXOR(ac.content, bc.bitmap, value2.bitmap) - ac.content = ac.content[:newCardinality] - return ac -} - -func (bc *bitmapContainer) and(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.andArray(x) - case *bitmapContainer: - return bc.andBitmap(x) - case *runContainer16: - if x.isFull() { - return bc.clone() - } - return x.andBitmapContainer(bc) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) andCardinality(a container) int { - switch x := a.(type) { - case *arrayContainer: - return bc.andArrayCardinality(x) - case *bitmapContainer: - return bc.andBitmapCardinality(x) - case *runContainer16: - return x.andBitmapContainerCardinality(bc) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) intersects(a container) bool { - switch x := a.(type) { - case *arrayContainer: - return bc.intersectsArray(x) - case *bitmapContainer: - return bc.intersectsBitmap(x) - case *runContainer16: - return x.intersects(bc) - - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) iand(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.iandArray(x) - case *bitmapContainer: - return bc.iandBitmap(x) - case *runContainer16: - if x.isFull() { - return bc.clone() - } - return bc.iandRun16(x) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) iandRun16(rc *runContainer16) container { - rcb := newBitmapContainerFromRun(rc) - return bc.iandBitmap(rcb) -} - -func (bc *bitmapContainer) iandArray(ac *arrayContainer) container { - acb := ac.toBitmapContainer() - return bc.iandBitmap(acb) -} - -func (bc *bitmapContainer) andArray(value2 *arrayContainer) *arrayContainer { - answer := newArrayContainerCapacity(len(value2.content)) - answer.content = answer.content[:cap(answer.content)] - c := value2.getCardinality() - pos := 0 - for k := 0; k < c; k++ { - v := value2.content[k] - answer.content[pos] = v - pos += int(bc.bitValue(v)) - } - answer.content = answer.content[:pos] - return answer -} - -func (bc *bitmapContainer) andArrayCardinality(value2 *arrayContainer) int { - c := value2.getCardinality() - pos := 0 - for k := 0; k < c; k++ { - v := value2.content[k] - pos += int(bc.bitValue(v)) - } - return pos -} - -func (bc *bitmapContainer) getCardinalityInRange(start, end uint) int { - if start >= end { - return 0 - } - firstword := start / 64 - endword := (end - 1) / 64 - const allones = ^uint64(0) - if firstword == endword { - return int(popcount(bc.bitmap[firstword] & ((allones << (start % 64)) & (allones >> ((64 - end) & 63))))) - } - answer := popcount(bc.bitmap[firstword] & (allones << (start % 64))) - answer += popcntSlice(bc.bitmap[firstword+1 : endword]) - answer += popcount(bc.bitmap[endword] & (allones >> ((64 - end) & 63))) - return int(answer) -} - -func (bc *bitmapContainer) andBitmap(value2 *bitmapContainer) container { - newcardinality := int(popcntAndSlice(bc.bitmap, value2.bitmap)) - if newcardinality > arrayDefaultMaxSize { - answer := newBitmapContainer() - for k := 0; k < len(answer.bitmap); k++ { - answer.bitmap[k] = bc.bitmap[k] & value2.bitmap[k] - } - answer.cardinality = newcardinality - return answer - } - ac := newArrayContainerSize(newcardinality) - fillArrayAND(ac.content, bc.bitmap, value2.bitmap) - ac.content = ac.content[:newcardinality] //not sure why i need this - return ac - -} - -func (bc *bitmapContainer) intersectsArray(value2 *arrayContainer) bool { - c := value2.getCardinality() - for k := 0; k < c; k++ { - v := value2.content[k] - if bc.contains(v) { - return true - } - } - return false -} - -func (bc *bitmapContainer) intersectsBitmap(value2 *bitmapContainer) bool { - for k := 0; k < len(bc.bitmap); k++ { - if (bc.bitmap[k] & value2.bitmap[k]) != 0 { - return true - } - } - return false - -} - -func (bc *bitmapContainer) iandBitmap(value2 *bitmapContainer) container { - newcardinality := int(popcntAndSlice(bc.bitmap, value2.bitmap)) - for k := 0; k < len(bc.bitmap); k++ { - bc.bitmap[k] = bc.bitmap[k] & value2.bitmap[k] - } - bc.cardinality = newcardinality - - if newcardinality <= arrayDefaultMaxSize { - return newArrayContainerFromBitmap(bc) - } - return bc -} - -func (bc *bitmapContainer) andNot(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.andNotArray(x) - case *bitmapContainer: - return bc.andNotBitmap(x) - case *runContainer16: - return bc.andNotRun16(x) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) andNotRun16(rc *runContainer16) container { - rcb := rc.toBitmapContainer() - return bc.andNotBitmap(rcb) -} - -func (bc *bitmapContainer) iandNot(a container) container { - switch x := a.(type) { - case *arrayContainer: - return bc.iandNotArray(x) - case *bitmapContainer: - return bc.iandNotBitmapSurely(x) - case *runContainer16: - return bc.iandNotRun16(x) - } - panic("unsupported container type") -} - -func (bc *bitmapContainer) iandNotArray(ac *arrayContainer) container { - acb := ac.toBitmapContainer() - return bc.iandNotBitmapSurely(acb) -} - -func (bc *bitmapContainer) iandNotRun16(rc *runContainer16) container { - rcb := rc.toBitmapContainer() - return bc.iandNotBitmapSurely(rcb) -} - -func (bc *bitmapContainer) andNotArray(value2 *arrayContainer) container { - answer := bc.clone().(*bitmapContainer) - c := value2.getCardinality() - for k := 0; k < c; k++ { - vc := value2.content[k] - i := uint(vc) >> 6 - oldv := answer.bitmap[i] - newv := oldv &^ (uint64(1) << (vc % 64)) - answer.bitmap[i] = newv - answer.cardinality -= int((oldv ^ newv) >> (vc % 64)) - } - if answer.cardinality <= arrayDefaultMaxSize { - return answer.toArrayContainer() - } - return answer -} - -func (bc *bitmapContainer) andNotBitmap(value2 *bitmapContainer) container { - newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap)) - if newCardinality > arrayDefaultMaxSize { - answer := newBitmapContainer() - for k := 0; k < len(answer.bitmap); k++ { - answer.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k] - } - answer.cardinality = newCardinality - return answer - } - ac := newArrayContainerSize(newCardinality) - fillArrayANDNOT(ac.content, bc.bitmap, value2.bitmap) - return ac -} - -func (bc *bitmapContainer) iandNotBitmapSurely(value2 *bitmapContainer) container { - newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap)) - for k := 0; k < len(bc.bitmap); k++ { - bc.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k] - } - bc.cardinality = newCardinality - if bc.getCardinality() <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc -} - -func (bc *bitmapContainer) contains(i uint16) bool { //testbit - x := uint(i) - w := bc.bitmap[x>>6] - mask := uint64(1) << (x & 63) - return (w & mask) != 0 -} - -func (bc *bitmapContainer) bitValue(i uint16) uint64 { - x := uint(i) - w := bc.bitmap[x>>6] - return (w >> (x & 63)) & 1 -} - -func (bc *bitmapContainer) loadData(arrayContainer *arrayContainer) { - bc.cardinality = arrayContainer.getCardinality() - c := arrayContainer.getCardinality() - for k := 0; k < c; k++ { - x := arrayContainer.content[k] - i := int(x) / 64 - bc.bitmap[i] |= (uint64(1) << uint(x%64)) - } -} - -func (bc *bitmapContainer) toArrayContainer() *arrayContainer { - ac := &arrayContainer{} - ac.loadData(bc) - return ac -} - -func (bc *bitmapContainer) fillArray(container []uint16) { - //TODO: rewrite in assembly - pos := 0 - base := 0 - for k := 0; k < len(bc.bitmap); k++ { - bitset := bc.bitmap[k] - for bitset != 0 { - t := bitset & -bitset - container[pos] = uint16((base + int(popcount(t-1)))) - pos = pos + 1 - bitset ^= t - } - base += 64 - } -} - -func (bc *bitmapContainer) NextSetBit(i int) int { - x := i / 64 - if x >= len(bc.bitmap) { - return -1 - } - w := bc.bitmap[x] - w = w >> uint(i%64) - if w != 0 { - return i + countTrailingZeros(w) - } - x++ - for ; x < len(bc.bitmap); x++ { - if bc.bitmap[x] != 0 { - return (x * 64) + countTrailingZeros(bc.bitmap[x]) - } - } - return -1 -} - -func (bc *bitmapContainer) PrevSetBit(i int) int { - if i < 0 { - return -1 - } - x := i / 64 - if x >= len(bc.bitmap) { - return -1 - } - - w := bc.bitmap[x] - - b := i % 64 - - w = w << uint(63-b) - if w != 0 { - return i - countLeadingZeros(w) - } - x-- - for ; x >= 0; x-- { - if bc.bitmap[x] != 0 { - return (x * 64) + 63 - countLeadingZeros(bc.bitmap[x]) - } - } - return -1 -} - -// reference the java implementation -// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/BitmapContainer.java#L875-L892 -// -func (bc *bitmapContainer) numberOfRuns() int { - if bc.cardinality == 0 { - return 0 - } - - var numRuns uint64 - nextWord := bc.bitmap[0] - - for i := 0; i < len(bc.bitmap)-1; i++ { - word := nextWord - nextWord = bc.bitmap[i+1] - numRuns += popcount((^word)&(word<<1)) + ((word >> 63) &^ nextWord) - } - - word := nextWord - numRuns += popcount((^word) & (word << 1)) - if (word & 0x8000000000000000) != 0 { - numRuns++ - } - - return int(numRuns) -} - -// convert to run or array *if needed* -func (bc *bitmapContainer) toEfficientContainer() container { - - numRuns := bc.numberOfRuns() - - sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns) - sizeAsBitmapContainer := bitmapContainerSizeInBytes() - card := bc.getCardinality() - sizeAsArrayContainer := arrayContainerSizeInBytes(card) - - if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) { - return newRunContainer16FromBitmapContainer(bc) - } - if card <= arrayDefaultMaxSize { - return bc.toArrayContainer() - } - return bc -} - -func newBitmapContainerFromRun(rc *runContainer16) *bitmapContainer { - - if len(rc.iv) == 1 { - return newBitmapContainerwithRange(int(rc.iv[0].start), int(rc.iv[0].last())) - } - - bc := newBitmapContainer() - for i := range rc.iv { - setBitmapRange(bc.bitmap, int(rc.iv[i].start), int(rc.iv[i].last())+1) - bc.cardinality += int(rc.iv[i].last()) + 1 - int(rc.iv[i].start) - } - //bc.computeCardinality() - return bc -} - -func (bc *bitmapContainer) containerType() contype { - return bitmapContype -} - -func (bc *bitmapContainer) addOffset(x uint16) []container { - low := newBitmapContainer() - high := newBitmapContainer() - b := uint32(x) >> 6 - i := uint32(x) % 64 - end := uint32(1024) - b - if i == 0 { - copy(low.bitmap[b:], bc.bitmap[:end]) - copy(high.bitmap[:b], bc.bitmap[end:]) - } else { - low.bitmap[b] = bc.bitmap[0] << i - for k := uint32(1); k < end; k++ { - newval := bc.bitmap[k] << i - if newval == 0 { - newval = bc.bitmap[k-1] >> (64 - i) - } - low.bitmap[b+k] = newval - } - for k := end; k < 1024; k++ { - newval := bc.bitmap[k] << i - if newval == 0 { - newval = bc.bitmap[k-1] >> (64 - i) - } - high.bitmap[k-end] = newval - } - high.bitmap[b] = bc.bitmap[1023] >> (64 - i) - } - low.computeCardinality() - high.computeCardinality() - return []container{low, high} -} diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go deleted file mode 100644 index 9b5a465f3..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go +++ /dev/null @@ -1,415 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbzg uint32 - zbzg, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbzg > 0 { - zbzg-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.cardinality, err = dc.ReadInt() - if err != nil { - return - } - case "bitmap": - var zbai uint32 - zbai, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.bitmap) >= int(zbai) { - z.bitmap = (z.bitmap)[:zbai] - } else { - z.bitmap = make([]uint64, zbai) - } - for zxvk := range z.bitmap { - z.bitmap[zxvk], err = dc.ReadUint64() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "cardinality" - err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteInt(z.cardinality) - if err != nil { - return - } - // write "bitmap" - err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.bitmap))) - if err != nil { - return - } - for zxvk := range z.bitmap { - err = en.WriteUint64(z.bitmap[zxvk]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "cardinality" - o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - o = msgp.AppendInt(o, z.cardinality) - // string "bitmap" - o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - o = msgp.AppendArrayHeader(o, uint32(len(z.bitmap))) - for zxvk := range z.bitmap { - o = msgp.AppendUint64(o, z.bitmap[zxvk]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zcmr uint32 - zcmr, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcmr > 0 { - zcmr-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.cardinality, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - case "bitmap": - var zajw uint32 - zajw, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.bitmap) >= int(zajw) { - z.bitmap = (z.bitmap)[:zajw] - } else { - z.bitmap = make([]uint64, zajw) - } - for zxvk := range z.bitmap { - z.bitmap[zxvk], bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *bitmapContainer) Msgsize() (s int) { - s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size)) - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zhct uint32 - zhct, err = dc.ReadMapHeader() - if err != nil { - return - } - for zhct > 0 { - zhct-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "ptr": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.ptr = nil - } else { - if z.ptr == nil { - z.ptr = new(bitmapContainer) - } - var zcua uint32 - zcua, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcua > 0 { - zcua-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.ptr.cardinality, err = dc.ReadInt() - if err != nil { - return - } - case "bitmap": - var zxhx uint32 - zxhx, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.ptr.bitmap) >= int(zxhx) { - z.ptr.bitmap = (z.ptr.bitmap)[:zxhx] - } else { - z.ptr.bitmap = make([]uint64, zxhx) - } - for zwht := range z.ptr.bitmap { - z.ptr.bitmap[zwht], err = dc.ReadUint64() - if err != nil { - return - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "i": - z.i, err = dc.ReadInt() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "ptr" - err = en.Append(0x82, 0xa3, 0x70, 0x74, 0x72) - if err != nil { - return err - } - if z.ptr == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - // map header, size 2 - // write "cardinality" - err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteInt(z.ptr.cardinality) - if err != nil { - return - } - // write "bitmap" - err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.ptr.bitmap))) - if err != nil { - return - } - for zwht := range z.ptr.bitmap { - err = en.WriteUint64(z.ptr.bitmap[zwht]) - if err != nil { - return - } - } - } - // write "i" - err = en.Append(0xa1, 0x69) - if err != nil { - return err - } - err = en.WriteInt(z.i) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "ptr" - o = append(o, 0x82, 0xa3, 0x70, 0x74, 0x72) - if z.ptr == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "cardinality" - o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79) - o = msgp.AppendInt(o, z.ptr.cardinality) - // string "bitmap" - o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70) - o = msgp.AppendArrayHeader(o, uint32(len(z.ptr.bitmap))) - for zwht := range z.ptr.bitmap { - o = msgp.AppendUint64(o, z.ptr.bitmap[zwht]) - } - } - // string "i" - o = append(o, 0xa1, 0x69) - o = msgp.AppendInt(o, z.i) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zlqf uint32 - zlqf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zlqf > 0 { - zlqf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "ptr": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ptr = nil - } else { - if z.ptr == nil { - z.ptr = new(bitmapContainer) - } - var zdaf uint32 - zdaf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zdaf > 0 { - zdaf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "cardinality": - z.ptr.cardinality, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - case "bitmap": - var zpks uint32 - zpks, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.ptr.bitmap) >= int(zpks) { - z.ptr.bitmap = (z.ptr.bitmap)[:zpks] - } else { - z.ptr.bitmap = make([]uint64, zpks) - } - for zwht := range z.ptr.bitmap { - z.ptr.bitmap[zwht], bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "i": - z.i, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *bitmapContainerShortIterator) Msgsize() (s int) { - s = 1 + 4 - if z.ptr == nil { - s += msgp.NilSize - } else { - s += 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ptr.bitmap) * (msgp.Uint64Size)) - } - s += 2 + msgp.IntSize - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/byte_input.go b/vendor/github.com/RoaringBitmap/roaring/byte_input.go deleted file mode 100644 index f7a98a1d4..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/byte_input.go +++ /dev/null @@ -1,161 +0,0 @@ -package roaring - -import ( - "encoding/binary" - "io" -) - -type byteInput interface { - // next returns a slice containing the next n bytes from the buffer, - // advancing the buffer as if the bytes had been returned by Read. - next(n int) ([]byte, error) - // readUInt32 reads uint32 with LittleEndian order - readUInt32() (uint32, error) - // readUInt16 reads uint16 with LittleEndian order - readUInt16() (uint16, error) - // getReadBytes returns read bytes - getReadBytes() int64 - // skipBytes skips exactly n bytes - skipBytes(n int) error -} - -func newByteInputFromReader(reader io.Reader) byteInput { - return &byteInputAdapter{ - r: reader, - readBytes: 0, - } -} - -func newByteInput(buf []byte) byteInput { - return &byteBuffer{ - buf: buf, - off: 0, - } -} - -type byteBuffer struct { - buf []byte - off int -} - -// next returns a slice containing the next n bytes from the reader -// If there are fewer bytes than the given n, io.ErrUnexpectedEOF will be returned -func (b *byteBuffer) next(n int) ([]byte, error) { - m := len(b.buf) - b.off - - if n > m { - return nil, io.ErrUnexpectedEOF - } - - data := b.buf[b.off : b.off+n] - b.off += n - - return data, nil -} - -// readUInt32 reads uint32 with LittleEndian order -func (b *byteBuffer) readUInt32() (uint32, error) { - if len(b.buf)-b.off < 4 { - return 0, io.ErrUnexpectedEOF - } - - v := binary.LittleEndian.Uint32(b.buf[b.off:]) - b.off += 4 - - return v, nil -} - -// readUInt16 reads uint16 with LittleEndian order -func (b *byteBuffer) readUInt16() (uint16, error) { - if len(b.buf)-b.off < 2 { - return 0, io.ErrUnexpectedEOF - } - - v := binary.LittleEndian.Uint16(b.buf[b.off:]) - b.off += 2 - - return v, nil -} - -// getReadBytes returns read bytes -func (b *byteBuffer) getReadBytes() int64 { - return int64(b.off) -} - -// skipBytes skips exactly n bytes -func (b *byteBuffer) skipBytes(n int) error { - m := len(b.buf) - b.off - - if n > m { - return io.ErrUnexpectedEOF - } - - b.off += n - - return nil -} - -// reset resets the given buffer with a new byte slice -func (b *byteBuffer) reset(buf []byte) { - b.buf = buf - b.off = 0 -} - -type byteInputAdapter struct { - r io.Reader - readBytes int -} - -// next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -func (b *byteInputAdapter) next(n int) ([]byte, error) { - buf := make([]byte, n) - m, err := io.ReadAtLeast(b.r, buf, n) - b.readBytes += m - - if err != nil { - return nil, err - } - - return buf, nil -} - -// readUInt32 reads uint32 with LittleEndian order -func (b *byteInputAdapter) readUInt32() (uint32, error) { - buf, err := b.next(4) - - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint32(buf), nil -} - -// readUInt16 reads uint16 with LittleEndian order -func (b *byteInputAdapter) readUInt16() (uint16, error) { - buf, err := b.next(2) - - if err != nil { - return 0, err - } - - return binary.LittleEndian.Uint16(buf), nil -} - -// getReadBytes returns read bytes -func (b *byteInputAdapter) getReadBytes() int64 { - return int64(b.readBytes) -} - -// skipBytes skips exactly n bytes -func (b *byteInputAdapter) skipBytes(n int) error { - _, err := b.next(n) - - return err -} - -// reset resets the given buffer with a new stream -func (b *byteInputAdapter) reset(stream io.Reader) { - b.r = stream - b.readBytes = 0 -} diff --git a/vendor/github.com/RoaringBitmap/roaring/clz.go b/vendor/github.com/RoaringBitmap/roaring/clz.go deleted file mode 100644 index bcd80d32f..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/clz.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 -// "go1.9", from Go version 1.9 onward -// See https://golang.org/pkg/go/build/#hdr-Build_Constraints - -package roaring - -import "math/bits" - -func countLeadingZeros(x uint64) int { - return bits.LeadingZeros64(x) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/clz_compat.go b/vendor/github.com/RoaringBitmap/roaring/clz_compat.go deleted file mode 100644 index eeef4de35..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/clz_compat.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !go1.9 - -package roaring - -// LeadingZeroBits returns the number of consecutive most significant zero -// bits of x. -func countLeadingZeros(i uint64) int { - if i == 0 { - return 64 - } - n := 1 - x := uint32(i >> 32) - if x == 0 { - n += 32 - x = uint32(i) - } - if (x >> 16) == 0 { - n += 16 - x <<= 16 - } - if (x >> 24) == 0 { - n += 8 - x <<= 8 - } - if x>>28 == 0 { - n += 4 - x <<= 4 - } - if x>>30 == 0 { - n += 2 - x <<= 2 - - } - n -= int(x >> 31) - return n -} diff --git a/vendor/github.com/RoaringBitmap/roaring/ctz.go b/vendor/github.com/RoaringBitmap/roaring/ctz.go deleted file mode 100644 index e399dddeb..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/ctz.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 -// "go1.9", from Go version 1.9 onward -// See https://golang.org/pkg/go/build/#hdr-Build_Constraints - -package roaring - -import "math/bits" - -func countTrailingZeros(x uint64) int { - return bits.TrailingZeros64(x) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/ctz_compat.go b/vendor/github.com/RoaringBitmap/roaring/ctz_compat.go deleted file mode 100644 index 80220e6be..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/ctz_compat.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build !go1.9 - -package roaring - -// Reuse of portions of go/src/math/big standard lib code -// under this license: -/* -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -const deBruijn32 = 0x077CB531 - -var deBruijn32Lookup = []byte{ - 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, -} - -const deBruijn64 = 0x03f79d71b4ca8b09 - -var deBruijn64Lookup = []byte{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} - -// trailingZeroBits returns the number of consecutive least significant zero -// bits of x. -func countTrailingZeros(x uint64) int { - // x & -x leaves only the right-most bit set in the word. Let k be the - // index of that bit. Since only a single bit is set, the value is two - // to the power of k. Multiplying by a power of two is equivalent to - // left shifting, in this case by k bits. The de Bruijn constant is - // such that all six bit, consecutive substrings are distinct. - // Therefore, if we have a left shifted version of this constant we can - // find by how many bits it was shifted by looking at which six bit - // substring ended up at the top of the word. - // (Knuth, volume 4, section 7.3.1) - if x == 0 { - // We have to special case 0; the fomula - // below doesn't work for 0. - return 64 - } - return int(deBruijn64Lookup[((x&-x)*(deBruijn64))>>58]) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go deleted file mode 100644 index 762e500ed..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go +++ /dev/null @@ -1,215 +0,0 @@ -package roaring - -import ( - "container/heap" -) - -// Or function that requires repairAfterLazy -func lazyOR(x1, x2 *Bitmap) *Bitmap { - answer := NewBitmap() - pos1 := 0 - pos2 := 0 - length1 := x1.highlowcontainer.size() - length2 := x2.highlowcontainer.size() -main: - for (pos1 < length1) && (pos2 < length2) { - s1 := x1.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - - for { - if s1 < s2 { - answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1) - pos1++ - if pos1 == length1 { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - } else if s1 > s2 { - answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2) - pos2++ - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else { - c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - switch t := c1.(type) { - case *arrayContainer: - c1 = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c1 = t.toBitmapContainer() - } - } - - answer.highlowcontainer.appendContainer(s1, c1.lazyOR(x2.highlowcontainer.getContainerAtIndex(pos2)), false) - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } - if pos1 == length1 { - answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2) - } else if pos2 == length2 { - answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1) - } - return answer -} - -// In-place Or function that requires repairAfterLazy -func (x1 *Bitmap) lazyOR(x2 *Bitmap) *Bitmap { - pos1 := 0 - pos2 := 0 - length1 := x1.highlowcontainer.size() - length2 := x2.highlowcontainer.size() -main: - for (pos1 < length1) && (pos2 < length2) { - s1 := x1.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - - for { - if s1 < s2 { - pos1++ - if pos1 == length1 { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - } else if s1 > s2 { - x1.highlowcontainer.insertNewKeyValueAt(pos1, s2, x2.highlowcontainer.getContainerAtIndex(pos2).clone()) - pos2++ - pos1++ - length1++ - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else { - c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - switch t := c1.(type) { - case *arrayContainer: - c1 = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c1 = t.toBitmapContainer() - } - case *bitmapContainer: - c1 = x1.highlowcontainer.getWritableContainerAtIndex(pos1) - } - - x1.highlowcontainer.containers[pos1] = c1.lazyIOR(x2.highlowcontainer.getContainerAtIndex(pos2)) - x1.highlowcontainer.needCopyOnWrite[pos1] = false - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } - if pos1 == length1 { - x1.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2) - } - return x1 -} - -// to be called after lazy aggregates -func (x1 *Bitmap) repairAfterLazy() { - for pos := 0; pos < x1.highlowcontainer.size(); pos++ { - c := x1.highlowcontainer.getContainerAtIndex(pos) - switch c.(type) { - case *bitmapContainer: - if c.(*bitmapContainer).cardinality == invalidCardinality { - c = x1.highlowcontainer.getWritableContainerAtIndex(pos) - c.(*bitmapContainer).computeCardinality() - if c.(*bitmapContainer).getCardinality() <= arrayDefaultMaxSize { - x1.highlowcontainer.setContainerAtIndex(pos, c.(*bitmapContainer).toArrayContainer()) - } else if c.(*bitmapContainer).isFull() { - x1.highlowcontainer.setContainerAtIndex(pos, newRunContainer16Range(0, MaxUint16)) - } - } - } - } -} - -// FastAnd computes the intersection between many bitmaps quickly -// Compared to the And function, it can take many bitmaps as input, thus saving the trouble -// of manually calling "And" many times. -func FastAnd(bitmaps ...*Bitmap) *Bitmap { - if len(bitmaps) == 0 { - return NewBitmap() - } else if len(bitmaps) == 1 { - return bitmaps[0].Clone() - } - answer := And(bitmaps[0], bitmaps[1]) - for _, bm := range bitmaps[2:] { - answer.And(bm) - } - return answer -} - -// FastOr computes the union between many bitmaps quickly, as opposed to having to call Or repeatedly. -// It might also be faster than calling Or repeatedly. -func FastOr(bitmaps ...*Bitmap) *Bitmap { - if len(bitmaps) == 0 { - return NewBitmap() - } else if len(bitmaps) == 1 { - return bitmaps[0].Clone() - } - answer := lazyOR(bitmaps[0], bitmaps[1]) - for _, bm := range bitmaps[2:] { - answer = answer.lazyOR(bm) - } - // here is where repairAfterLazy is called. - answer.repairAfterLazy() - return answer -} - -// HeapOr computes the union between many bitmaps quickly using a heap. -// It might be faster than calling Or repeatedly. -func HeapOr(bitmaps ...*Bitmap) *Bitmap { - if len(bitmaps) == 0 { - return NewBitmap() - } - // TODO: for better speed, we could do the operation lazily, see Java implementation - pq := make(priorityQueue, len(bitmaps)) - for i, bm := range bitmaps { - pq[i] = &item{bm, i} - } - heap.Init(&pq) - - for pq.Len() > 1 { - x1 := heap.Pop(&pq).(*item) - x2 := heap.Pop(&pq).(*item) - heap.Push(&pq, &item{Or(x1.value, x2.value), 0}) - } - return heap.Pop(&pq).(*item).value -} - -// HeapXor computes the symmetric difference between many bitmaps quickly (as opposed to calling Xor repeated). -// Internally, this function uses a heap. -// It might be faster than calling Xor repeatedly. -func HeapXor(bitmaps ...*Bitmap) *Bitmap { - if len(bitmaps) == 0 { - return NewBitmap() - } - - pq := make(priorityQueue, len(bitmaps)) - for i, bm := range bitmaps { - pq[i] = &item{bm, i} - } - heap.Init(&pq) - - for pq.Len() > 1 { - x1 := heap.Pop(&pq).(*item) - x2 := heap.Pop(&pq).(*item) - heap.Push(&pq, &item{Xor(x1.value, x2.value), 0}) - } - return heap.Pop(&pq).(*item).value -} diff --git a/vendor/github.com/RoaringBitmap/roaring/go.mod b/vendor/github.com/RoaringBitmap/roaring/go.mod deleted file mode 100644 index f5aebf396..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/RoaringBitmap/roaring - -go 1.12 - -require ( - github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 - github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect - github.com/golang/snappy v0.0.1 // indirect - github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae - github.com/philhofer/fwd v1.0.0 // indirect - github.com/stretchr/testify v1.4.0 - github.com/tinylib/msgp v1.1.0 - github.com/willf/bitset v1.1.10 -) diff --git a/vendor/github.com/RoaringBitmap/roaring/go.sum b/vendor/github.com/RoaringBitmap/roaring/go.sum deleted file mode 100644 index 2e27dbb6e..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/go.sum +++ /dev/null @@ -1,30 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/RoaringBitmap/roaring/manyiterator.go b/vendor/github.com/RoaringBitmap/roaring/manyiterator.go deleted file mode 100644 index 300756377..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/manyiterator.go +++ /dev/null @@ -1,18 +0,0 @@ -package roaring - -type manyIterable interface { - nextMany(hs uint32, buf []uint32) int -} - -func (si *shortIterator) nextMany(hs uint32, buf []uint32) int { - n := 0 - l := si.loc - s := si.slice - for n < len(buf) && l < len(s) { - buf[n] = uint32(s[l]) | hs - l++ - n++ - } - si.loc = l - return n -} diff --git a/vendor/github.com/RoaringBitmap/roaring/parallel.go b/vendor/github.com/RoaringBitmap/roaring/parallel.go deleted file mode 100644 index 2af1aed48..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/parallel.go +++ /dev/null @@ -1,613 +0,0 @@ -package roaring - -import ( - "container/heap" - "fmt" - "runtime" - "sync" -) - -var defaultWorkerCount = runtime.NumCPU() - -type bitmapContainerKey struct { - key uint16 - idx int - bitmap *Bitmap -} - -type multipleContainers struct { - key uint16 - containers []container - idx int -} - -type keyedContainer struct { - key uint16 - container container - idx int -} - -type bitmapContainerHeap []bitmapContainerKey - -func (h bitmapContainerHeap) Len() int { return len(h) } -func (h bitmapContainerHeap) Less(i, j int) bool { return h[i].key < h[j].key } -func (h bitmapContainerHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *bitmapContainerHeap) Push(x interface{}) { - // Push and Pop use pointer receivers because they modify the slice's length, - // not just its contents. - *h = append(*h, x.(bitmapContainerKey)) -} - -func (h *bitmapContainerHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - -func (h bitmapContainerHeap) Peek() bitmapContainerKey { - return h[0] -} - -func (h *bitmapContainerHeap) popIncrementing() (key uint16, container container) { - k := h.Peek() - key = k.key - container = k.bitmap.highlowcontainer.containers[k.idx] - - newIdx := k.idx + 1 - if newIdx < k.bitmap.highlowcontainer.size() { - k = bitmapContainerKey{ - k.bitmap.highlowcontainer.keys[newIdx], - newIdx, - k.bitmap, - } - (*h)[0] = k - heap.Fix(h, 0) - } else { - heap.Pop(h) - } - - return -} - -func (h *bitmapContainerHeap) Next(containers []container) multipleContainers { - if h.Len() == 0 { - return multipleContainers{} - } - - key, container := h.popIncrementing() - containers = append(containers, container) - - for h.Len() > 0 && key == h.Peek().key { - _, container = h.popIncrementing() - containers = append(containers, container) - } - - return multipleContainers{ - key, - containers, - -1, - } -} - -func newBitmapContainerHeap(bitmaps ...*Bitmap) bitmapContainerHeap { - // Initialize heap - var h bitmapContainerHeap = make([]bitmapContainerKey, 0, len(bitmaps)) - for _, bitmap := range bitmaps { - if !bitmap.IsEmpty() { - key := bitmapContainerKey{ - bitmap.highlowcontainer.keys[0], - 0, - bitmap, - } - h = append(h, key) - } - } - - heap.Init(&h) - - return h -} - -func repairAfterLazy(c container) container { - switch t := c.(type) { - case *bitmapContainer: - if t.cardinality == invalidCardinality { - t.computeCardinality() - } - - if t.getCardinality() <= arrayDefaultMaxSize { - return t.toArrayContainer() - } else if c.(*bitmapContainer).isFull() { - return newRunContainer16Range(0, MaxUint16) - } - } - - return c -} - -func toBitmapContainer(c container) container { - switch t := c.(type) { - case *arrayContainer: - return t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - return t.toBitmapContainer() - } - } - return c -} - -func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer, expectedKeysChan <-chan int) { - expectedKeys := -1 - appendedKeys := 0 - var keys []uint16 - var containers []container - for appendedKeys != expectedKeys { - select { - case item := <-resultChan: - if len(keys) <= item.idx { - keys = append(keys, make([]uint16, item.idx-len(keys)+1)...) - containers = append(containers, make([]container, item.idx-len(containers)+1)...) - } - keys[item.idx] = item.key - containers[item.idx] = item.container - - appendedKeys++ - case msg := <-expectedKeysChan: - expectedKeys = msg - } - } - answer := &Bitmap{ - roaringArray{ - make([]uint16, 0, expectedKeys), - make([]container, 0, expectedKeys), - make([]bool, 0, expectedKeys), - false, - nil, - }, - } - for i := range keys { - if containers[i] != nil { // in case a resulting container was empty, see ParAnd function - answer.highlowcontainer.appendContainer(keys[i], containers[i], false) - } - } - - bitmapChan <- answer -} - -// ParHeapOr computes the union (OR) of all provided bitmaps in parallel, -// where the parameter "parallelism" determines how many workers are to be used -// (if it is set to 0, a default number of workers is chosen) -// ParHeapOr uses a heap to compute the union. For rare cases it might be faster than ParOr -func ParHeapOr(parallelism int, bitmaps ...*Bitmap) *Bitmap { - - bitmapCount := len(bitmaps) - if bitmapCount == 0 { - return NewBitmap() - } else if bitmapCount == 1 { - return bitmaps[0].Clone() - } - - if parallelism == 0 { - parallelism = defaultWorkerCount - } - - h := newBitmapContainerHeap(bitmaps...) - - bitmapChan := make(chan *Bitmap) - inputChan := make(chan multipleContainers, 128) - resultChan := make(chan keyedContainer, 32) - expectedKeysChan := make(chan int) - - pool := sync.Pool{ - New: func() interface{} { - return make([]container, 0, len(bitmaps)) - }, - } - - orFunc := func() { - // Assumes only structs with >=2 containers are passed - for input := range inputChan { - c := toBitmapContainer(input.containers[0]).lazyOR(input.containers[1]) - for _, next := range input.containers[2:] { - c = c.lazyIOR(next) - } - c = repairAfterLazy(c) - kx := keyedContainer{ - input.key, - c, - input.idx, - } - resultChan <- kx - pool.Put(input.containers[:0]) - } - } - - go appenderRoutine(bitmapChan, resultChan, expectedKeysChan) - - for i := 0; i < parallelism; i++ { - go orFunc() - } - - idx := 0 - for h.Len() > 0 { - ck := h.Next(pool.Get().([]container)) - if len(ck.containers) == 1 { - resultChan <- keyedContainer{ - ck.key, - ck.containers[0], - idx, - } - pool.Put(ck.containers[:0]) - } else { - ck.idx = idx - inputChan <- ck - } - idx++ - } - expectedKeysChan <- idx - - bitmap := <-bitmapChan - - close(inputChan) - close(resultChan) - close(expectedKeysChan) - - return bitmap -} - -// ParAnd computes the intersection (AND) of all provided bitmaps in parallel, -// where the parameter "parallelism" determines how many workers are to be used -// (if it is set to 0, a default number of workers is chosen) -func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap { - bitmapCount := len(bitmaps) - if bitmapCount == 0 { - return NewBitmap() - } else if bitmapCount == 1 { - return bitmaps[0].Clone() - } - - if parallelism == 0 { - parallelism = defaultWorkerCount - } - - h := newBitmapContainerHeap(bitmaps...) - - bitmapChan := make(chan *Bitmap) - inputChan := make(chan multipleContainers, 128) - resultChan := make(chan keyedContainer, 32) - expectedKeysChan := make(chan int) - - andFunc := func() { - // Assumes only structs with >=2 containers are passed - for input := range inputChan { - c := input.containers[0].and(input.containers[1]) - for _, next := range input.containers[2:] { - if c.getCardinality() == 0 { - break - } - c = c.iand(next) - } - - // Send a nil explicitly if the result of the intersection is an empty container - if c.getCardinality() == 0 { - c = nil - } - - kx := keyedContainer{ - input.key, - c, - input.idx, - } - resultChan <- kx - } - } - - go appenderRoutine(bitmapChan, resultChan, expectedKeysChan) - - for i := 0; i < parallelism; i++ { - go andFunc() - } - - idx := 0 - for h.Len() > 0 { - ck := h.Next(make([]container, 0, 4)) - if len(ck.containers) == bitmapCount { - ck.idx = idx - inputChan <- ck - idx++ - } - } - expectedKeysChan <- idx - - bitmap := <-bitmapChan - - close(inputChan) - close(resultChan) - close(expectedKeysChan) - - return bitmap -} - -// ParOr computes the union (OR) of all provided bitmaps in parallel, -// where the parameter "parallelism" determines how many workers are to be used -// (if it is set to 0, a default number of workers is chosen) -func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap { - var lKey uint16 = MaxUint16 - var hKey uint16 - - bitmapsFiltered := bitmaps[:0] - for _, b := range bitmaps { - if !b.IsEmpty() { - bitmapsFiltered = append(bitmapsFiltered, b) - } - } - bitmaps = bitmapsFiltered - - for _, b := range bitmaps { - lKey = minOfUint16(lKey, b.highlowcontainer.keys[0]) - hKey = maxOfUint16(hKey, b.highlowcontainer.keys[b.highlowcontainer.size()-1]) - } - - if lKey == MaxUint16 && hKey == 0 { - return New() - } else if len(bitmaps) == 1 { - return bitmaps[0] - } - - keyRange := hKey - lKey + 1 - if keyRange == 1 { - // revert to FastOr. Since the key range is 0 - // no container-level aggregation parallelism is achievable - return FastOr(bitmaps...) - } - - if parallelism == 0 { - parallelism = defaultWorkerCount - } - - var chunkSize int - var chunkCount int - if parallelism*4 > int(keyRange) { - chunkSize = 1 - chunkCount = int(keyRange) - } else { - chunkCount = parallelism * 4 - chunkSize = (int(keyRange) + chunkCount - 1) / chunkCount - } - - if chunkCount*chunkSize < int(keyRange) { - // it's fine to panic to indicate an implementation error - panic(fmt.Sprintf("invariant check failed: chunkCount * chunkSize < keyRange, %d * %d < %d", chunkCount, chunkSize, keyRange)) - } - - chunks := make([]*roaringArray, chunkCount) - - chunkSpecChan := make(chan parChunkSpec, minOfInt(maxOfInt(64, 2*parallelism), int(chunkCount))) - chunkChan := make(chan parChunk, minOfInt(32, int(chunkCount))) - - orFunc := func() { - for spec := range chunkSpecChan { - ra := lazyOrOnRange(&bitmaps[0].highlowcontainer, &bitmaps[1].highlowcontainer, spec.start, spec.end) - for _, b := range bitmaps[2:] { - ra = lazyIOrOnRange(ra, &b.highlowcontainer, spec.start, spec.end) - } - - for i, c := range ra.containers { - ra.containers[i] = repairAfterLazy(c) - } - - chunkChan <- parChunk{ra, spec.idx} - } - } - - for i := 0; i < parallelism; i++ { - go orFunc() - } - - go func() { - for i := 0; i < chunkCount; i++ { - spec := parChunkSpec{ - start: uint16(int(lKey) + i*chunkSize), - end: uint16(minOfInt(int(lKey)+(i+1)*chunkSize-1, int(hKey))), - idx: int(i), - } - chunkSpecChan <- spec - } - }() - - chunksRemaining := chunkCount - for chunk := range chunkChan { - chunks[chunk.idx] = chunk.ra - chunksRemaining-- - if chunksRemaining == 0 { - break - } - } - close(chunkChan) - close(chunkSpecChan) - - containerCount := 0 - for _, chunk := range chunks { - containerCount += chunk.size() - } - - result := Bitmap{ - roaringArray{ - containers: make([]container, containerCount), - keys: make([]uint16, containerCount), - needCopyOnWrite: make([]bool, containerCount), - }, - } - - resultOffset := 0 - for _, chunk := range chunks { - copy(result.highlowcontainer.containers[resultOffset:], chunk.containers) - copy(result.highlowcontainer.keys[resultOffset:], chunk.keys) - copy(result.highlowcontainer.needCopyOnWrite[resultOffset:], chunk.needCopyOnWrite) - resultOffset += chunk.size() - } - - return &result -} - -type parChunkSpec struct { - start uint16 - end uint16 - idx int -} - -type parChunk struct { - ra *roaringArray - idx int -} - -func (c parChunk) size() int { - return c.ra.size() -} - -func parNaiveStartAt(ra *roaringArray, start uint16, last uint16) int { - for idx, key := range ra.keys { - if key >= start && key <= last { - return idx - } else if key > last { - break - } - } - return ra.size() -} - -func lazyOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray { - answer := newRoaringArray() - length1 := ra1.size() - length2 := ra2.size() - - idx1 := parNaiveStartAt(ra1, start, last) - idx2 := parNaiveStartAt(ra2, start, last) - - var key1 uint16 - var key2 uint16 - if idx1 < length1 && idx2 < length2 { - key1 = ra1.getKeyAtIndex(idx1) - key2 = ra2.getKeyAtIndex(idx2) - - for key1 <= last && key2 <= last { - - if key1 < key2 { - answer.appendCopy(*ra1, idx1) - idx1++ - if idx1 == length1 { - break - } - key1 = ra1.getKeyAtIndex(idx1) - } else if key1 > key2 { - answer.appendCopy(*ra2, idx2) - idx2++ - if idx2 == length2 { - break - } - key2 = ra2.getKeyAtIndex(idx2) - } else { - c1 := ra1.getFastContainerAtIndex(idx1, false) - - answer.appendContainer(key1, c1.lazyOR(ra2.getContainerAtIndex(idx2)), false) - idx1++ - idx2++ - if idx1 == length1 || idx2 == length2 { - break - } - - key1 = ra1.getKeyAtIndex(idx1) - key2 = ra2.getKeyAtIndex(idx2) - } - } - } - - if idx2 < length2 { - key2 = ra2.getKeyAtIndex(idx2) - for key2 <= last { - answer.appendCopy(*ra2, idx2) - idx2++ - if idx2 == length2 { - break - } - key2 = ra2.getKeyAtIndex(idx2) - } - } - - if idx1 < length1 { - key1 = ra1.getKeyAtIndex(idx1) - for key1 <= last { - answer.appendCopy(*ra1, idx1) - idx1++ - if idx1 == length1 { - break - } - key1 = ra1.getKeyAtIndex(idx1) - } - } - return answer -} - -func lazyIOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray { - length1 := ra1.size() - length2 := ra2.size() - - idx1 := 0 - idx2 := parNaiveStartAt(ra2, start, last) - - var key1 uint16 - var key2 uint16 - if idx1 < length1 && idx2 < length2 { - key1 = ra1.getKeyAtIndex(idx1) - key2 = ra2.getKeyAtIndex(idx2) - - for key1 <= last && key2 <= last { - if key1 < key2 { - idx1++ - if idx1 >= length1 { - break - } - key1 = ra1.getKeyAtIndex(idx1) - } else if key1 > key2 { - ra1.insertNewKeyValueAt(idx1, key2, ra2.getContainerAtIndex(idx2)) - ra1.needCopyOnWrite[idx1] = true - idx2++ - idx1++ - length1++ - if idx2 >= length2 { - break - } - key2 = ra2.getKeyAtIndex(idx2) - } else { - c1 := ra1.getFastContainerAtIndex(idx1, true) - - ra1.containers[idx1] = c1.lazyIOR(ra2.getContainerAtIndex(idx2)) - ra1.needCopyOnWrite[idx1] = false - idx1++ - idx2++ - if idx1 >= length1 || idx2 >= length2 { - break - } - - key1 = ra1.getKeyAtIndex(idx1) - key2 = ra2.getKeyAtIndex(idx2) - } - } - } - if idx2 < length2 { - key2 = ra2.getKeyAtIndex(idx2) - for key2 <= last { - ra1.appendCopy(*ra2, idx2) - idx2++ - if idx2 >= length2 { - break - } - key2 = ra2.getKeyAtIndex(idx2) - } - } - return ra1 -} diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt.go b/vendor/github.com/RoaringBitmap/roaring/popcnt.go deleted file mode 100644 index 9d99508ce..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/popcnt.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.9 -// "go1.9", from Go version 1.9 onward -// See https://golang.org/pkg/go/build/#hdr-Build_Constraints - -package roaring - -import "math/bits" - -func popcount(x uint64) uint64 { - return uint64(bits.OnesCount64(x)) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s b/vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s deleted file mode 100644 index 1f13fa2ec..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s +++ /dev/null @@ -1,103 +0,0 @@ -// +build amd64,!appengine,!go1.9 - -TEXT ·hasAsm(SB),4,$0-1 -MOVQ $1, AX -CPUID -SHRQ $23, CX -ANDQ $1, CX -MOVB CX, ret+0(FP) -RET - -#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 - -TEXT ·popcntSliceAsm(SB),4,$0-32 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntSliceEnd -popcntSliceLoop: -BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX -ADDQ DX, AX -ADDQ $8, SI -LOOP popcntSliceLoop -popcntSliceEnd: -MOVQ AX, ret+24(FP) -RET - -TEXT ·popcntMaskSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntMaskSliceEnd -MOVQ m+24(FP), DI -popcntMaskSliceLoop: -MOVQ (DI), DX -NOTQ DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntMaskSliceLoop -popcntMaskSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntAndSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntAndSliceEnd -MOVQ m+24(FP), DI -popcntAndSliceLoop: -MOVQ (DI), DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntAndSliceLoop -popcntAndSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntOrSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntOrSliceEnd -MOVQ m+24(FP), DI -popcntOrSliceLoop: -MOVQ (DI), DX -ORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntOrSliceLoop -popcntOrSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntXorSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntXorSliceEnd -MOVQ m+24(FP), DI -popcntXorSliceLoop: -MOVQ (DI), DX -XORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntXorSliceLoop -popcntXorSliceEnd: -MOVQ AX, ret+48(FP) -RET diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go deleted file mode 100644 index 882d7f4ec..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build amd64,!appengine,!go1.9 - -package roaring - -// *** the following functions are defined in popcnt_amd64.s - -//go:noescape - -func hasAsm() bool - -// useAsm is a flag used to select the GO or ASM implementation of the popcnt function -var useAsm = hasAsm() - -//go:noescape - -func popcntSliceAsm(s []uint64) uint64 - -//go:noescape - -func popcntMaskSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntAndSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntOrSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntXorSliceAsm(s, m []uint64) uint64 - -func popcntSlice(s []uint64) uint64 { - if useAsm { - return popcntSliceAsm(s) - } - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - if useAsm { - return popcntMaskSliceAsm(s, m) - } - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - if useAsm { - return popcntAndSliceAsm(s, m) - } - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - if useAsm { - return popcntOrSliceAsm(s, m) - } - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - if useAsm { - return popcntXorSliceAsm(s, m) - } - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go deleted file mode 100644 index 7ae82d4c8..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !go1.9 - -package roaring - -// bit population count, take from -// https://code.google.com/p/go/issues/detail?id=4988#c11 -// credit: https://code.google.com/u/arnehormann/ -// credit: https://play.golang.org/p/U7SogJ7psJ -// credit: http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel -func popcount(x uint64) uint64 { - x -= (x >> 1) & 0x5555555555555555 - x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 - x += x >> 4 - x &= 0x0f0f0f0f0f0f0f0f - x *= 0x0101010101010101 - return x >> 56 -} diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go deleted file mode 100644 index edf2083f1..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !amd64 appengine go1.9 - -package roaring - -func popcntSlice(s []uint64) uint64 { - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go deleted file mode 100644 index d27c5f383..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go +++ /dev/null @@ -1,41 +0,0 @@ -package roaring - -func popcntSliceGo(s []uint64) uint64 { - cnt := uint64(0) - for _, x := range s { - cnt += popcount(x) - } - return cnt -} - -func popcntMaskSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] &^ m[i]) - } - return cnt -} - -func popcntAndSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] & m[i]) - } - return cnt -} - -func popcntOrSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] | m[i]) - } - return cnt -} - -func popcntXorSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] ^ m[i]) - } - return cnt -} diff --git a/vendor/github.com/RoaringBitmap/roaring/priorityqueue.go b/vendor/github.com/RoaringBitmap/roaring/priorityqueue.go deleted file mode 100644 index 9259a6816..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/priorityqueue.go +++ /dev/null @@ -1,101 +0,0 @@ -package roaring - -import "container/heap" - -///////////// -// The priorityQueue is used to keep Bitmaps sorted. -//////////// - -type item struct { - value *Bitmap - index int -} - -type priorityQueue []*item - -func (pq priorityQueue) Len() int { return len(pq) } - -func (pq priorityQueue) Less(i, j int) bool { - return pq[i].value.GetSizeInBytes() < pq[j].value.GetSizeInBytes() -} - -func (pq priorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *priorityQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*item) - item.index = n - *pq = append(*pq, item) -} - -func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - item.index = -1 // for safety - *pq = old[0 : n-1] - return item -} - -func (pq *priorityQueue) update(item *item, value *Bitmap) { - item.value = value - heap.Fix(pq, item.index) -} - -///////////// -// The containerPriorityQueue is used to keep the containers of various Bitmaps sorted. -//////////// - -type containeritem struct { - value *Bitmap - keyindex int - index int -} - -type containerPriorityQueue []*containeritem - -func (pq containerPriorityQueue) Len() int { return len(pq) } - -func (pq containerPriorityQueue) Less(i, j int) bool { - k1 := pq[i].value.highlowcontainer.getKeyAtIndex(pq[i].keyindex) - k2 := pq[j].value.highlowcontainer.getKeyAtIndex(pq[j].keyindex) - if k1 != k2 { - return k1 < k2 - } - c1 := pq[i].value.highlowcontainer.getContainerAtIndex(pq[i].keyindex) - c2 := pq[j].value.highlowcontainer.getContainerAtIndex(pq[j].keyindex) - - return c1.getCardinality() > c2.getCardinality() -} - -func (pq containerPriorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *containerPriorityQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*containeritem) - item.index = n - *pq = append(*pq, item) -} - -func (pq *containerPriorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - item.index = -1 // for safety - *pq = old[0 : n-1] - return item -} - -//func (pq *containerPriorityQueue) update(item *containeritem, value *Bitmap, keyindex int) { -// item.value = value -// item.keyindex = keyindex -// heap.Fix(pq, item.index) -//} diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go deleted file mode 100644 index df58cc30b..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/roaring.go +++ /dev/null @@ -1,1494 +0,0 @@ -// Package roaring is an implementation of Roaring Bitmaps in Go. -// They provide fast compressed bitmap data structures (also called bitset). -// They are ideally suited to represent sets of integers over -// relatively small ranges. -// See http://roaringbitmap.org for details. -package roaring - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "strconv" - "sync" -) - -// Bitmap represents a compressed bitmap where you can add integers. -type Bitmap struct { - highlowcontainer roaringArray -} - -// ToBase64 serializes a bitmap as Base64 -func (rb *Bitmap) ToBase64() (string, error) { - buf := new(bytes.Buffer) - _, err := rb.WriteTo(buf) - return base64.StdEncoding.EncodeToString(buf.Bytes()), err - -} - -// FromBase64 deserializes a bitmap from Base64 -func (rb *Bitmap) FromBase64(str string) (int64, error) { - data, err := base64.StdEncoding.DecodeString(str) - if err != nil { - return 0, err - } - buf := bytes.NewBuffer(data) - - return rb.ReadFrom(buf) -} - -// WriteTo writes a serialized version of this bitmap to stream. -// The format is compatible with other RoaringBitmap -// implementations (Java, C) and is documented here: -// https://github.com/RoaringBitmap/RoaringFormatSpec -func (rb *Bitmap) WriteTo(stream io.Writer) (int64, error) { - return rb.highlowcontainer.writeTo(stream) -} - -// ToBytes returns an array of bytes corresponding to what is written -// when calling WriteTo -func (rb *Bitmap) ToBytes() ([]byte, error) { - return rb.highlowcontainer.toBytes() -} - -// Deprecated: WriteToMsgpack writes a msgpack2/snappy-streaming compressed serialized -// version of this bitmap to stream. The format is not -// compatible with the WriteTo() format, and is -// experimental: it may produce smaller on disk -// footprint and/or be faster to read, depending -// on your content. Currently only the Go roaring -// implementation supports this format. -func (rb *Bitmap) WriteToMsgpack(stream io.Writer) (int64, error) { - return 0, rb.highlowcontainer.writeToMsgpack(stream) -} - -// ReadFrom reads a serialized version of this bitmap from stream. -// The format is compatible with other RoaringBitmap -// implementations (Java, C) and is documented here: -// https://github.com/RoaringBitmap/RoaringFormatSpec -func (rb *Bitmap) ReadFrom(reader io.Reader) (p int64, err error) { - stream := byteInputAdapterPool.Get().(*byteInputAdapter) - stream.reset(reader) - - p, err = rb.highlowcontainer.readFrom(stream) - byteInputAdapterPool.Put(stream) - - return -} - -// FromBuffer creates a bitmap from its serialized version stored in buffer -// -// The format specification is available here: -// https://github.com/RoaringBitmap/RoaringFormatSpec -// -// The provided byte array (buf) is expected to be a constant. -// The function makes the best effort attempt not to copy data. -// You should take care not to modify buff as it will -// likely result in unexpected program behavior. -// -// Resulting bitmaps are effectively immutable in the following sense: -// a copy-on-write marker is used so that when you modify the resulting -// bitmap, copies of selected data (containers) are made. -// You should *not* change the copy-on-write status of the resulting -// bitmaps (SetCopyOnWrite). -// -// If buf becomes unavailable, then a bitmap created with -// FromBuffer would be effectively broken. Furthermore, any -// bitmap derived from this bitmap (e.g., via Or, And) might -// also be broken. Thus, before making buf unavailable, you should -// call CloneCopyOnWriteContainers on all such bitmaps. -// -func (rb *Bitmap) FromBuffer(buf []byte) (p int64, err error) { - stream := byteBufferPool.Get().(*byteBuffer) - stream.reset(buf) - - p, err = rb.highlowcontainer.readFrom(stream) - byteBufferPool.Put(stream) - - return -} - -var ( - byteBufferPool = sync.Pool{ - New: func() interface{} { - return &byteBuffer{} - }, - } - - byteInputAdapterPool = sync.Pool{ - New: func() interface{} { - return &byteInputAdapter{} - }, - } -) - -// RunOptimize attempts to further compress the runs of consecutive values found in the bitmap -func (rb *Bitmap) RunOptimize() { - rb.highlowcontainer.runOptimize() -} - -// HasRunCompression returns true if the bitmap benefits from run compression -func (rb *Bitmap) HasRunCompression() bool { - return rb.highlowcontainer.hasRunCompression() -} - -// Deprecated: ReadFromMsgpack reads a msgpack2/snappy-streaming serialized -// version of this bitmap from stream. The format is -// expected is that written by the WriteToMsgpack() -// call; see additional notes there. -func (rb *Bitmap) ReadFromMsgpack(stream io.Reader) (int64, error) { - return 0, rb.highlowcontainer.readFromMsgpack(stream) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface for the bitmap -// (same as ToBytes) -func (rb *Bitmap) MarshalBinary() ([]byte, error) { - return rb.ToBytes() -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface for the bitmap -func (rb *Bitmap) UnmarshalBinary(data []byte) error { - r := bytes.NewReader(data) - _, err := rb.ReadFrom(r) - return err -} - -// NewBitmap creates a new empty Bitmap (see also New) -func NewBitmap() *Bitmap { - return &Bitmap{} -} - -// New creates a new empty Bitmap (same as NewBitmap) -func New() *Bitmap { - return &Bitmap{} -} - -// Clear resets the Bitmap to be logically empty, but may retain -// some memory allocations that may speed up future operations -func (rb *Bitmap) Clear() { - rb.highlowcontainer.clear() -} - -// ToArray creates a new slice containing all of the integers stored in the Bitmap in sorted order -func (rb *Bitmap) ToArray() []uint32 { - array := make([]uint32, rb.GetCardinality()) - pos := 0 - pos2 := 0 - - for pos < rb.highlowcontainer.size() { - hs := uint32(rb.highlowcontainer.getKeyAtIndex(pos)) << 16 - c := rb.highlowcontainer.getContainerAtIndex(pos) - pos++ - c.fillLeastSignificant16bits(array, pos2, hs) - pos2 += c.getCardinality() - } - return array -} - -// GetSizeInBytes estimates the memory usage of the Bitmap. Note that this -// might differ slightly from the amount of bytes required for persistent storage -func (rb *Bitmap) GetSizeInBytes() uint64 { - size := uint64(8) - for _, c := range rb.highlowcontainer.containers { - size += uint64(2) + uint64(c.getSizeInBytes()) - } - return size -} - -// GetSerializedSizeInBytes computes the serialized size in bytes -// of the Bitmap. It should correspond to the -// number of bytes written when invoking WriteTo. You can expect -// that this function is much cheaper computationally than WriteTo. -func (rb *Bitmap) GetSerializedSizeInBytes() uint64 { - return rb.highlowcontainer.serializedSizeInBytes() -} - -// BoundSerializedSizeInBytes returns an upper bound on the serialized size in bytes -// assuming that one wants to store "cardinality" integers in [0, universe_size) -func BoundSerializedSizeInBytes(cardinality uint64, universeSize uint64) uint64 { - contnbr := (universeSize + uint64(65535)) / uint64(65536) - if contnbr > cardinality { - contnbr = cardinality - // we can't have more containers than we have values - } - headermax := 8*contnbr + 4 - if 4 > (contnbr+7)/8 { - headermax += 4 - } else { - headermax += (contnbr + 7) / 8 - } - valsarray := uint64(arrayContainerSizeInBytes(int(cardinality))) - valsbitmap := contnbr * uint64(bitmapContainerSizeInBytes()) - valsbest := valsarray - if valsbest > valsbitmap { - valsbest = valsbitmap - } - return valsbest + headermax -} - -// IntIterable allows you to iterate over the values in a Bitmap -type IntIterable interface { - HasNext() bool - Next() uint32 -} - -// IntPeekable allows you to look at the next value without advancing and -// advance as long as the next value is smaller than minval -type IntPeekable interface { - IntIterable - // PeekNext peeks the next value without advancing the iterator - PeekNext() uint32 - // AdvanceIfNeeded advances as long as the next value is smaller than minval - AdvanceIfNeeded(minval uint32) -} - -type intIterator struct { - pos int - hs uint32 - iter shortPeekable - highlowcontainer *roaringArray -} - -// HasNext returns true if there are more integers to iterate over -func (ii *intIterator) HasNext() bool { - return ii.pos < ii.highlowcontainer.size() -} - -func (ii *intIterator) init() { - if ii.highlowcontainer.size() > ii.pos { - ii.iter = ii.highlowcontainer.getContainerAtIndex(ii.pos).getShortIterator() - ii.hs = uint32(ii.highlowcontainer.getKeyAtIndex(ii.pos)) << 16 - } -} - -// Next returns the next integer -func (ii *intIterator) Next() uint32 { - x := uint32(ii.iter.next()) | ii.hs - if !ii.iter.hasNext() { - ii.pos = ii.pos + 1 - ii.init() - } - return x -} - -// PeekNext peeks the next value without advancing the iterator -func (ii *intIterator) PeekNext() uint32 { - return uint32(ii.iter.peekNext()&maxLowBit) | ii.hs -} - -// AdvanceIfNeeded advances as long as the next value is smaller than minval -func (ii *intIterator) AdvanceIfNeeded(minval uint32) { - to := minval >> 16 - - for ii.HasNext() && (ii.hs>>16) < to { - ii.pos++ - ii.init() - } - - if ii.HasNext() && (ii.hs>>16) == to { - ii.iter.advanceIfNeeded(lowbits(minval)) - - if !ii.iter.hasNext() { - ii.pos++ - ii.init() - } - } -} - -func newIntIterator(a *Bitmap) *intIterator { - p := new(intIterator) - p.pos = 0 - p.highlowcontainer = &a.highlowcontainer - p.init() - return p -} - -type intReverseIterator struct { - pos int - hs uint32 - iter shortIterable - highlowcontainer *roaringArray -} - -// HasNext returns true if there are more integers to iterate over -func (ii *intReverseIterator) HasNext() bool { - return ii.pos >= 0 -} - -func (ii *intReverseIterator) init() { - if ii.pos >= 0 { - ii.iter = ii.highlowcontainer.getContainerAtIndex(ii.pos).getReverseIterator() - ii.hs = uint32(ii.highlowcontainer.getKeyAtIndex(ii.pos)) << 16 - } else { - ii.iter = nil - } -} - -// Next returns the next integer -func (ii *intReverseIterator) Next() uint32 { - x := uint32(ii.iter.next()) | ii.hs - if !ii.iter.hasNext() { - ii.pos = ii.pos - 1 - ii.init() - } - return x -} - -func newIntReverseIterator(a *Bitmap) *intReverseIterator { - p := new(intReverseIterator) - p.highlowcontainer = &a.highlowcontainer - p.pos = a.highlowcontainer.size() - 1 - p.init() - return p -} - -// ManyIntIterable allows you to iterate over the values in a Bitmap -type ManyIntIterable interface { - // pass in a buffer to fill up with values, returns how many values were returned - NextMany([]uint32) int -} - -type manyIntIterator struct { - pos int - hs uint32 - iter manyIterable - highlowcontainer *roaringArray -} - -func (ii *manyIntIterator) init() { - if ii.highlowcontainer.size() > ii.pos { - ii.iter = ii.highlowcontainer.getContainerAtIndex(ii.pos).getManyIterator() - ii.hs = uint32(ii.highlowcontainer.getKeyAtIndex(ii.pos)) << 16 - } else { - ii.iter = nil - } -} - -func (ii *manyIntIterator) NextMany(buf []uint32) int { - n := 0 - for n < len(buf) { - if ii.iter == nil { - break - } - moreN := ii.iter.nextMany(ii.hs, buf[n:]) - n += moreN - if moreN == 0 { - ii.pos = ii.pos + 1 - ii.init() - } - } - - return n -} - -func newManyIntIterator(a *Bitmap) *manyIntIterator { - p := new(manyIntIterator) - p.pos = 0 - p.highlowcontainer = &a.highlowcontainer - p.init() - return p -} - -// String creates a string representation of the Bitmap -func (rb *Bitmap) String() string { - // inspired by https://github.com/fzandona/goroar/ - var buffer bytes.Buffer - start := []byte("{") - buffer.Write(start) - i := rb.Iterator() - counter := 0 - if i.HasNext() { - counter = counter + 1 - buffer.WriteString(strconv.FormatInt(int64(i.Next()), 10)) - } - for i.HasNext() { - buffer.WriteString(",") - counter = counter + 1 - // to avoid exhausting the memory - if counter > 0x40000 { - buffer.WriteString("...") - break - } - buffer.WriteString(strconv.FormatInt(int64(i.Next()), 10)) - } - buffer.WriteString("}") - return buffer.String() -} - -// Iterator creates a new IntPeekable to iterate over the integers contained in the bitmap, in sorted order; -// the iterator becomes invalid if the bitmap is modified (e.g., with Add or Remove). -func (rb *Bitmap) Iterator() IntPeekable { - return newIntIterator(rb) -} - -// ReverseIterator creates a new IntIterable to iterate over the integers contained in the bitmap, in sorted order; -// the iterator becomes invalid if the bitmap is modified (e.g., with Add or Remove). -func (rb *Bitmap) ReverseIterator() IntIterable { - return newIntReverseIterator(rb) -} - -// ManyIterator creates a new ManyIntIterable to iterate over the integers contained in the bitmap, in sorted order; -// the iterator becomes invalid if the bitmap is modified (e.g., with Add or Remove). -func (rb *Bitmap) ManyIterator() ManyIntIterable { - return newManyIntIterator(rb) -} - -// Clone creates a copy of the Bitmap -func (rb *Bitmap) Clone() *Bitmap { - ptr := new(Bitmap) - ptr.highlowcontainer = *rb.highlowcontainer.clone() - return ptr -} - -// Minimum get the smallest value stored in this roaring bitmap, assumes that it is not empty -func (rb *Bitmap) Minimum() uint32 { - return uint32(rb.highlowcontainer.containers[0].minimum()) | (uint32(rb.highlowcontainer.keys[0]) << 16) -} - -// Maximum get the largest value stored in this roaring bitmap, assumes that it is not empty -func (rb *Bitmap) Maximum() uint32 { - lastindex := len(rb.highlowcontainer.containers) - 1 - return uint32(rb.highlowcontainer.containers[lastindex].maximum()) | (uint32(rb.highlowcontainer.keys[lastindex]) << 16) -} - -// Contains returns true if the integer is contained in the bitmap -func (rb *Bitmap) Contains(x uint32) bool { - hb := highbits(x) - c := rb.highlowcontainer.getContainer(hb) - return c != nil && c.contains(lowbits(x)) -} - -// ContainsInt returns true if the integer is contained in the bitmap (this is a convenience method, the parameter is casted to uint32 and Contains is called) -func (rb *Bitmap) ContainsInt(x int) bool { - return rb.Contains(uint32(x)) -} - -// Equals returns true if the two bitmaps contain the same integers -func (rb *Bitmap) Equals(o interface{}) bool { - srb, ok := o.(*Bitmap) - if ok { - return srb.highlowcontainer.equals(rb.highlowcontainer) - } - return false -} - -// AddOffset adds the value 'offset' to each and every value in a bitmap, generating a new bitmap in the process -func AddOffset(x *Bitmap, offset uint32) (answer *Bitmap) { - containerOffset := highbits(offset) - inOffset := lowbits(offset) - if inOffset == 0 { - answer = x.Clone() - for pos := 0; pos < answer.highlowcontainer.size(); pos++ { - key := answer.highlowcontainer.getKeyAtIndex(pos) - key += containerOffset - answer.highlowcontainer.keys[pos] = key - } - } else { - answer = New() - for pos := 0; pos < x.highlowcontainer.size(); pos++ { - key := x.highlowcontainer.getKeyAtIndex(pos) - key += containerOffset - c := x.highlowcontainer.getContainerAtIndex(pos) - offsetted := c.addOffset(inOffset) - if offsetted[0].getCardinality() > 0 { - curSize := answer.highlowcontainer.size() - lastkey := uint16(0) - if curSize > 0 { - lastkey = answer.highlowcontainer.getKeyAtIndex(curSize - 1) - } - if curSize > 0 && lastkey == key { - prev := answer.highlowcontainer.getContainerAtIndex(curSize - 1) - orrseult := prev.ior(offsetted[0]) - answer.highlowcontainer.setContainerAtIndex(curSize-1, orrseult) - } else { - answer.highlowcontainer.appendContainer(key, offsetted[0], false) - } - } - if offsetted[1].getCardinality() > 0 { - answer.highlowcontainer.appendContainer(key+1, offsetted[1], false) - } - } - } - return answer -} - -// Add the integer x to the bitmap -func (rb *Bitmap) Add(x uint32) { - hb := highbits(x) - ra := &rb.highlowcontainer - i := ra.getIndex(hb) - if i >= 0 { - var c container - c = ra.getWritableContainerAtIndex(i).iaddReturnMinimized(lowbits(x)) - rb.highlowcontainer.setContainerAtIndex(i, c) - } else { - newac := newArrayContainer() - rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, newac.iaddReturnMinimized(lowbits(x))) - } -} - -// add the integer x to the bitmap, return the container and its index -func (rb *Bitmap) addwithptr(x uint32) (int, container) { - hb := highbits(x) - ra := &rb.highlowcontainer - i := ra.getIndex(hb) - var c container - if i >= 0 { - c = ra.getWritableContainerAtIndex(i).iaddReturnMinimized(lowbits(x)) - rb.highlowcontainer.setContainerAtIndex(i, c) - return i, c - } - newac := newArrayContainer() - c = newac.iaddReturnMinimized(lowbits(x)) - rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, c) - return -i - 1, c -} - -// CheckedAdd adds the integer x to the bitmap and return true if it was added (false if the integer was already present) -func (rb *Bitmap) CheckedAdd(x uint32) bool { - // TODO: add unit tests for this method - hb := highbits(x) - i := rb.highlowcontainer.getIndex(hb) - if i >= 0 { - C := rb.highlowcontainer.getWritableContainerAtIndex(i) - oldcard := C.getCardinality() - C = C.iaddReturnMinimized(lowbits(x)) - rb.highlowcontainer.setContainerAtIndex(i, C) - return C.getCardinality() > oldcard - } - newac := newArrayContainer() - rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, newac.iaddReturnMinimized(lowbits(x))) - return true - -} - -// AddInt adds the integer x to the bitmap (convenience method: the parameter is casted to uint32 and we call Add) -func (rb *Bitmap) AddInt(x int) { - rb.Add(uint32(x)) -} - -// Remove the integer x from the bitmap -func (rb *Bitmap) Remove(x uint32) { - hb := highbits(x) - i := rb.highlowcontainer.getIndex(hb) - if i >= 0 { - c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveReturnMinimized(lowbits(x)) - rb.highlowcontainer.setContainerAtIndex(i, c) - if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 { - rb.highlowcontainer.removeAtIndex(i) - } - } -} - -// CheckedRemove removes the integer x from the bitmap and return true if the integer was effectively remove (and false if the integer was not present) -func (rb *Bitmap) CheckedRemove(x uint32) bool { - // TODO: add unit tests for this method - hb := highbits(x) - i := rb.highlowcontainer.getIndex(hb) - if i >= 0 { - C := rb.highlowcontainer.getWritableContainerAtIndex(i) - oldcard := C.getCardinality() - C = C.iremoveReturnMinimized(lowbits(x)) - rb.highlowcontainer.setContainerAtIndex(i, C) - if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 { - rb.highlowcontainer.removeAtIndex(i) - return true - } - return C.getCardinality() < oldcard - } - return false - -} - -// IsEmpty returns true if the Bitmap is empty (it is faster than doing (GetCardinality() == 0)) -func (rb *Bitmap) IsEmpty() bool { - return rb.highlowcontainer.size() == 0 -} - -// GetCardinality returns the number of integers contained in the bitmap -func (rb *Bitmap) GetCardinality() uint64 { - size := uint64(0) - for _, c := range rb.highlowcontainer.containers { - size += uint64(c.getCardinality()) - } - return size -} - -// Rank returns the number of integers that are smaller or equal to x (Rank(infinity) would be GetCardinality()) -func (rb *Bitmap) Rank(x uint32) uint64 { - size := uint64(0) - for i := 0; i < rb.highlowcontainer.size(); i++ { - key := rb.highlowcontainer.getKeyAtIndex(i) - if key > highbits(x) { - return size - } - if key < highbits(x) { - size += uint64(rb.highlowcontainer.getContainerAtIndex(i).getCardinality()) - } else { - return size + uint64(rb.highlowcontainer.getContainerAtIndex(i).rank(lowbits(x))) - } - } - return size -} - -// Select returns the xth integer in the bitmap -func (rb *Bitmap) Select(x uint32) (uint32, error) { - if rb.GetCardinality() <= uint64(x) { - return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) - } - - remaining := x - for i := 0; i < rb.highlowcontainer.size(); i++ { - c := rb.highlowcontainer.getContainerAtIndex(i) - if remaining >= uint32(c.getCardinality()) { - remaining -= uint32(c.getCardinality()) - } else { - key := rb.highlowcontainer.getKeyAtIndex(i) - return uint32(key)<<16 + uint32(c.selectInt(uint16(remaining))), nil - } - } - return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) -} - -// And computes the intersection between two bitmaps and stores the result in the current bitmap -func (rb *Bitmap) And(x2 *Bitmap) { - pos1 := 0 - pos2 := 0 - intersectionsize := 0 - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - -main: - for { - if pos1 < length1 && pos2 < length2 { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - for { - if s1 == s2 { - c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1) - c2 := x2.highlowcontainer.getContainerAtIndex(pos2) - diff := c1.iand(c2) - if diff.getCardinality() > 0 { - rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false) - intersectionsize++ - } - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else if s1 < s2 { - pos1 = rb.highlowcontainer.advanceUntil(s2, pos1) - if pos1 == length1 { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - } else { //s1 > s2 - pos2 = x2.highlowcontainer.advanceUntil(s1, pos2) - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } else { - break - } - } - rb.highlowcontainer.resize(intersectionsize) -} - -// OrCardinality returns the cardinality of the union between two bitmaps, bitmaps are not modified -func (rb *Bitmap) OrCardinality(x2 *Bitmap) uint64 { - pos1 := 0 - pos2 := 0 - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - answer := uint64(0) -main: - for { - if (pos1 < length1) && (pos2 < length2) { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - - for { - if s1 < s2 { - answer += uint64(rb.highlowcontainer.getContainerAtIndex(pos1).getCardinality()) - pos1++ - if pos1 == length1 { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - } else if s1 > s2 { - answer += uint64(x2.highlowcontainer.getContainerAtIndex(pos2).getCardinality()) - pos2++ - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else { - // TODO: could be faster if we did not have to materialize the container - answer += uint64(rb.highlowcontainer.getContainerAtIndex(pos1).or(x2.highlowcontainer.getContainerAtIndex(pos2)).getCardinality()) - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } else { - break - } - } - for ; pos1 < length1; pos1++ { - answer += uint64(rb.highlowcontainer.getContainerAtIndex(pos1).getCardinality()) - } - for ; pos2 < length2; pos2++ { - answer += uint64(x2.highlowcontainer.getContainerAtIndex(pos2).getCardinality()) - } - return answer -} - -// AndCardinality returns the cardinality of the intersection between two bitmaps, bitmaps are not modified -func (rb *Bitmap) AndCardinality(x2 *Bitmap) uint64 { - pos1 := 0 - pos2 := 0 - answer := uint64(0) - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - -main: - for { - if pos1 < length1 && pos2 < length2 { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - for { - if s1 == s2 { - c1 := rb.highlowcontainer.getContainerAtIndex(pos1) - c2 := x2.highlowcontainer.getContainerAtIndex(pos2) - answer += uint64(c1.andCardinality(c2)) - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else if s1 < s2 { - pos1 = rb.highlowcontainer.advanceUntil(s2, pos1) - if pos1 == length1 { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - } else { //s1 > s2 - pos2 = x2.highlowcontainer.advanceUntil(s1, pos2) - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } else { - break - } - } - return answer -} - -// Intersects checks whether two bitmap intersects, bitmaps are not modified -func (rb *Bitmap) Intersects(x2 *Bitmap) bool { - pos1 := 0 - pos2 := 0 - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - -main: - for { - if pos1 < length1 && pos2 < length2 { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - for { - if s1 == s2 { - c1 := rb.highlowcontainer.getContainerAtIndex(pos1) - c2 := x2.highlowcontainer.getContainerAtIndex(pos2) - if c1.intersects(c2) { - return true - } - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else if s1 < s2 { - pos1 = rb.highlowcontainer.advanceUntil(s2, pos1) - if pos1 == length1 { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - } else { //s1 > s2 - pos2 = x2.highlowcontainer.advanceUntil(s1, pos2) - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } else { - break - } - } - return false -} - -// Xor computes the symmetric difference between two bitmaps and stores the result in the current bitmap -func (rb *Bitmap) Xor(x2 *Bitmap) { - pos1 := 0 - pos2 := 0 - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - for { - if (pos1 < length1) && (pos2 < length2) { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - if s1 < s2 { - pos1 = rb.highlowcontainer.advanceUntil(s2, pos1) - if pos1 == length1 { - break - } - } else if s1 > s2 { - c := x2.highlowcontainer.getWritableContainerAtIndex(pos2) - rb.highlowcontainer.insertNewKeyValueAt(pos1, x2.highlowcontainer.getKeyAtIndex(pos2), c) - length1++ - pos1++ - pos2++ - } else { - // TODO: couple be computed in-place for reduced memory usage - c := rb.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2)) - if c.getCardinality() > 0 { - rb.highlowcontainer.setContainerAtIndex(pos1, c) - pos1++ - } else { - rb.highlowcontainer.removeAtIndex(pos1) - length1-- - } - pos2++ - } - } else { - break - } - } - if pos1 == length1 { - rb.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2) - } -} - -// Or computes the union between two bitmaps and stores the result in the current bitmap -func (rb *Bitmap) Or(x2 *Bitmap) { - pos1 := 0 - pos2 := 0 - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() -main: - for (pos1 < length1) && (pos2 < length2) { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - - for { - if s1 < s2 { - pos1++ - if pos1 == length1 { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - } else if s1 > s2 { - rb.highlowcontainer.insertNewKeyValueAt(pos1, s2, x2.highlowcontainer.getContainerAtIndex(pos2).clone()) - pos1++ - length1++ - pos2++ - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else { - rb.highlowcontainer.replaceKeyAndContainerAtIndex(pos1, s1, rb.highlowcontainer.getWritableContainerAtIndex(pos1).ior(x2.highlowcontainer.getContainerAtIndex(pos2)), false) - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } - if pos1 == length1 { - rb.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2) - } -} - -// AndNot computes the difference between two bitmaps and stores the result in the current bitmap -func (rb *Bitmap) AndNot(x2 *Bitmap) { - pos1 := 0 - pos2 := 0 - intersectionsize := 0 - length1 := rb.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - -main: - for { - if pos1 < length1 && pos2 < length2 { - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - for { - if s1 == s2 { - c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1) - c2 := x2.highlowcontainer.getContainerAtIndex(pos2) - diff := c1.iandNot(c2) - if diff.getCardinality() > 0 { - rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false) - intersectionsize++ - } - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else if s1 < s2 { - c1 := rb.highlowcontainer.getContainerAtIndex(pos1) - mustCopyOnWrite := rb.highlowcontainer.needsCopyOnWrite(pos1) - rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, c1, mustCopyOnWrite) - intersectionsize++ - pos1++ - if pos1 == length1 { - break main - } - s1 = rb.highlowcontainer.getKeyAtIndex(pos1) - } else { //s1 > s2 - pos2 = x2.highlowcontainer.advanceUntil(s1, pos2) - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } else { - break - } - } - // TODO:implement as a copy - for pos1 < length1 { - c1 := rb.highlowcontainer.getContainerAtIndex(pos1) - s1 := rb.highlowcontainer.getKeyAtIndex(pos1) - mustCopyOnWrite := rb.highlowcontainer.needsCopyOnWrite(pos1) - rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, c1, mustCopyOnWrite) - intersectionsize++ - pos1++ - } - rb.highlowcontainer.resize(intersectionsize) -} - -// Or computes the union between two bitmaps and returns the result -func Or(x1, x2 *Bitmap) *Bitmap { - answer := NewBitmap() - pos1 := 0 - pos2 := 0 - length1 := x1.highlowcontainer.size() - length2 := x2.highlowcontainer.size() -main: - for (pos1 < length1) && (pos2 < length2) { - s1 := x1.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - - for { - if s1 < s2 { - answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1) - pos1++ - if pos1 == length1 { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - } else if s1 > s2 { - answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2) - pos2++ - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else { - - answer.highlowcontainer.appendContainer(s1, x1.highlowcontainer.getContainerAtIndex(pos1).or(x2.highlowcontainer.getContainerAtIndex(pos2)), false) - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } - if pos1 == length1 { - answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2) - } else if pos2 == length2 { - answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1) - } - return answer -} - -// And computes the intersection between two bitmaps and returns the result -func And(x1, x2 *Bitmap) *Bitmap { - answer := NewBitmap() - pos1 := 0 - pos2 := 0 - length1 := x1.highlowcontainer.size() - length2 := x2.highlowcontainer.size() -main: - for pos1 < length1 && pos2 < length2 { - s1 := x1.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - for { - if s1 == s2 { - C := x1.highlowcontainer.getContainerAtIndex(pos1) - C = C.and(x2.highlowcontainer.getContainerAtIndex(pos2)) - - if C.getCardinality() > 0 { - answer.highlowcontainer.appendContainer(s1, C, false) - } - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else if s1 < s2 { - pos1 = x1.highlowcontainer.advanceUntil(s2, pos1) - if pos1 == length1 { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - } else { // s1 > s2 - pos2 = x2.highlowcontainer.advanceUntil(s1, pos2) - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } - return answer -} - -// Xor computes the symmetric difference between two bitmaps and returns the result -func Xor(x1, x2 *Bitmap) *Bitmap { - answer := NewBitmap() - pos1 := 0 - pos2 := 0 - length1 := x1.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - for { - if (pos1 < length1) && (pos2 < length2) { - s1 := x1.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - if s1 < s2 { - answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1) - pos1++ - } else if s1 > s2 { - answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2) - pos2++ - } else { - c := x1.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2)) - if c.getCardinality() > 0 { - answer.highlowcontainer.appendContainer(s1, c, false) - } - pos1++ - pos2++ - } - } else { - break - } - } - if pos1 == length1 { - answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2) - } else if pos2 == length2 { - answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1) - } - return answer -} - -// AndNot computes the difference between two bitmaps and returns the result -func AndNot(x1, x2 *Bitmap) *Bitmap { - answer := NewBitmap() - pos1 := 0 - pos2 := 0 - length1 := x1.highlowcontainer.size() - length2 := x2.highlowcontainer.size() - -main: - for { - if pos1 < length1 && pos2 < length2 { - s1 := x1.highlowcontainer.getKeyAtIndex(pos1) - s2 := x2.highlowcontainer.getKeyAtIndex(pos2) - for { - if s1 < s2 { - answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1) - pos1++ - if pos1 == length1 { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - } else if s1 == s2 { - c1 := x1.highlowcontainer.getContainerAtIndex(pos1) - c2 := x2.highlowcontainer.getContainerAtIndex(pos2) - diff := c1.andNot(c2) - if diff.getCardinality() > 0 { - answer.highlowcontainer.appendContainer(s1, diff, false) - } - pos1++ - pos2++ - if (pos1 == length1) || (pos2 == length2) { - break main - } - s1 = x1.highlowcontainer.getKeyAtIndex(pos1) - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } else { //s1 > s2 - pos2 = x2.highlowcontainer.advanceUntil(s1, pos2) - if pos2 == length2 { - break main - } - s2 = x2.highlowcontainer.getKeyAtIndex(pos2) - } - } - } else { - break - } - } - if pos2 == length2 { - answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1) - } - return answer -} - -// AddMany add all of the values in dat -func (rb *Bitmap) AddMany(dat []uint32) { - if len(dat) == 0 { - return - } - prev := dat[0] - idx, c := rb.addwithptr(prev) - for _, i := range dat[1:] { - if highbits(prev) == highbits(i) { - c = c.iaddReturnMinimized(lowbits(i)) - rb.highlowcontainer.setContainerAtIndex(idx, c) - } else { - idx, c = rb.addwithptr(i) - } - prev = i - } -} - -// BitmapOf generates a new bitmap filled with the specified integers -func BitmapOf(dat ...uint32) *Bitmap { - ans := NewBitmap() - ans.AddMany(dat) - return ans -} - -// Flip negates the bits in the given range (i.e., [rangeStart,rangeEnd)), any integer present in this range and in the bitmap is removed, -// and any integer present in the range and not in the bitmap is added. -// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range -// while uint64(0x100000000) cannot be represented as a 32-bit value. -func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) { - - if rangeEnd > MaxUint32+1 { - panic("rangeEnd > MaxUint32+1") - } - if rangeStart > MaxUint32+1 { - panic("rangeStart > MaxUint32+1") - } - - if rangeStart >= rangeEnd { - return - } - - hbStart := uint32(highbits(uint32(rangeStart))) - lbStart := uint32(lowbits(uint32(rangeStart))) - hbLast := uint32(highbits(uint32(rangeEnd - 1))) - lbLast := uint32(lowbits(uint32(rangeEnd - 1))) - - var max uint32 = maxLowBit - for hb := hbStart; hb <= hbLast; hb++ { - var containerStart uint32 - if hb == hbStart { - containerStart = uint32(lbStart) - } - containerLast := max - if hb == hbLast { - containerLast = uint32(lbLast) - } - - i := rb.highlowcontainer.getIndex(uint16(hb)) - - if i >= 0 { - c := rb.highlowcontainer.getWritableContainerAtIndex(i).inot(int(containerStart), int(containerLast)+1) - if c.getCardinality() > 0 { - rb.highlowcontainer.setContainerAtIndex(i, c) - } else { - rb.highlowcontainer.removeAtIndex(i) - } - } else { // *think* the range of ones must never be - // empty. - rb.highlowcontainer.insertNewKeyValueAt(-i-1, uint16(hb), rangeOfOnes(int(containerStart), int(containerLast))) - } - } -} - -// FlipInt calls Flip after casting the parameters (convenience method) -func (rb *Bitmap) FlipInt(rangeStart, rangeEnd int) { - rb.Flip(uint64(rangeStart), uint64(rangeEnd)) -} - -// AddRange adds the integers in [rangeStart, rangeEnd) to the bitmap. -// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range -// while uint64(0x100000000) cannot be represented as a 32-bit value. -func (rb *Bitmap) AddRange(rangeStart, rangeEnd uint64) { - if rangeStart >= rangeEnd { - return - } - if rangeEnd-1 > MaxUint32 { - panic("rangeEnd-1 > MaxUint32") - } - hbStart := uint32(highbits(uint32(rangeStart))) - lbStart := uint32(lowbits(uint32(rangeStart))) - hbLast := uint32(highbits(uint32(rangeEnd - 1))) - lbLast := uint32(lowbits(uint32(rangeEnd - 1))) - - var max uint32 = maxLowBit - for hb := hbStart; hb <= hbLast; hb++ { - containerStart := uint32(0) - if hb == hbStart { - containerStart = lbStart - } - containerLast := max - if hb == hbLast { - containerLast = lbLast - } - - i := rb.highlowcontainer.getIndex(uint16(hb)) - - if i >= 0 { - c := rb.highlowcontainer.getWritableContainerAtIndex(i).iaddRange(int(containerStart), int(containerLast)+1) - rb.highlowcontainer.setContainerAtIndex(i, c) - } else { // *think* the range of ones must never be - // empty. - rb.highlowcontainer.insertNewKeyValueAt(-i-1, uint16(hb), rangeOfOnes(int(containerStart), int(containerLast))) - } - } -} - -// RemoveRange removes the integers in [rangeStart, rangeEnd) from the bitmap. -// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range -// while uint64(0x100000000) cannot be represented as a 32-bit value. -func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) { - if rangeStart >= rangeEnd { - return - } - if rangeEnd-1 > MaxUint32 { - // logically, we should assume that the user wants to - // remove all values from rangeStart to infinity - // see https://github.com/RoaringBitmap/roaring/issues/141 - rangeEnd = uint64(0x100000000) - } - hbStart := uint32(highbits(uint32(rangeStart))) - lbStart := uint32(lowbits(uint32(rangeStart))) - hbLast := uint32(highbits(uint32(rangeEnd - 1))) - lbLast := uint32(lowbits(uint32(rangeEnd - 1))) - - var max uint32 = maxLowBit - - if hbStart == hbLast { - i := rb.highlowcontainer.getIndex(uint16(hbStart)) - if i < 0 { - return - } - c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveRange(int(lbStart), int(lbLast+1)) - if c.getCardinality() > 0 { - rb.highlowcontainer.setContainerAtIndex(i, c) - } else { - rb.highlowcontainer.removeAtIndex(i) - } - return - } - ifirst := rb.highlowcontainer.getIndex(uint16(hbStart)) - ilast := rb.highlowcontainer.getIndex(uint16(hbLast)) - - if ifirst >= 0 { - if lbStart != 0 { - c := rb.highlowcontainer.getWritableContainerAtIndex(ifirst).iremoveRange(int(lbStart), int(max+1)) - if c.getCardinality() > 0 { - rb.highlowcontainer.setContainerAtIndex(ifirst, c) - ifirst++ - } - } - } else { - ifirst = -ifirst - 1 - } - if ilast >= 0 { - if lbLast != max { - c := rb.highlowcontainer.getWritableContainerAtIndex(ilast).iremoveRange(int(0), int(lbLast+1)) - if c.getCardinality() > 0 { - rb.highlowcontainer.setContainerAtIndex(ilast, c) - } else { - ilast++ - } - } else { - ilast++ - } - } else { - ilast = -ilast - 1 - } - rb.highlowcontainer.removeIndexRange(ifirst, ilast) -} - -// Flip negates the bits in the given range (i.e., [rangeStart,rangeEnd)), any integer present in this range and in the bitmap is removed, -// and any integer present in the range and not in the bitmap is added, a new bitmap is returned leaving -// the current bitmap unchanged. -// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range -// while uint64(0x100000000) cannot be represented as a 32-bit value. -func Flip(bm *Bitmap, rangeStart, rangeEnd uint64) *Bitmap { - if rangeStart >= rangeEnd { - return bm.Clone() - } - - if rangeStart > MaxUint32 { - panic("rangeStart > MaxUint32") - } - if rangeEnd-1 > MaxUint32 { - panic("rangeEnd-1 > MaxUint32") - } - - answer := NewBitmap() - hbStart := uint32(highbits(uint32(rangeStart))) - lbStart := uint32(lowbits(uint32(rangeStart))) - hbLast := uint32(highbits(uint32(rangeEnd - 1))) - lbLast := uint32(lowbits(uint32(rangeEnd - 1))) - - // copy the containers before the active area - answer.highlowcontainer.appendCopiesUntil(bm.highlowcontainer, uint16(hbStart)) - - var max uint32 = maxLowBit - for hb := hbStart; hb <= hbLast; hb++ { - var containerStart uint32 - if hb == hbStart { - containerStart = uint32(lbStart) - } - containerLast := max - if hb == hbLast { - containerLast = uint32(lbLast) - } - - i := bm.highlowcontainer.getIndex(uint16(hb)) - j := answer.highlowcontainer.getIndex(uint16(hb)) - - if i >= 0 { - c := bm.highlowcontainer.getContainerAtIndex(i).not(int(containerStart), int(containerLast)+1) - if c.getCardinality() > 0 { - answer.highlowcontainer.insertNewKeyValueAt(-j-1, uint16(hb), c) - } - - } else { // *think* the range of ones must never be - // empty. - answer.highlowcontainer.insertNewKeyValueAt(-j-1, uint16(hb), - rangeOfOnes(int(containerStart), int(containerLast))) - } - } - // copy the containers after the active area. - answer.highlowcontainer.appendCopiesAfter(bm.highlowcontainer, uint16(hbLast)) - - return answer -} - -// SetCopyOnWrite sets this bitmap to use copy-on-write so that copies are fast and memory conscious -// if the parameter is true, otherwise we leave the default where hard copies are made -// (copy-on-write requires extra care in a threaded context). -// Calling SetCopyOnWrite(true) on a bitmap created with FromBuffer is unsafe. -func (rb *Bitmap) SetCopyOnWrite(val bool) { - rb.highlowcontainer.copyOnWrite = val -} - -// GetCopyOnWrite gets this bitmap's copy-on-write property -func (rb *Bitmap) GetCopyOnWrite() (val bool) { - return rb.highlowcontainer.copyOnWrite -} - -// CloneCopyOnWriteContainers clones all containers which have -// needCopyOnWrite set to true. -// This can be used to make sure it is safe to munmap a []byte -// that the roaring array may still have a reference to, after -// calling FromBuffer. -// More generally this function is useful if you call FromBuffer -// to construct a bitmap with a backing array buf -// and then later discard the buf array. Note that you should call -// CloneCopyOnWriteContainers on all bitmaps that were derived -// from the 'FromBuffer' bitmap since they map have dependencies -// on the buf array as well. -func (rb *Bitmap) CloneCopyOnWriteContainers() { - rb.highlowcontainer.cloneCopyOnWriteContainers() -} - -// FlipInt calls Flip after casting the parameters (convenience method) -func FlipInt(bm *Bitmap, rangeStart, rangeEnd int) *Bitmap { - return Flip(bm, uint64(rangeStart), uint64(rangeEnd)) -} - -// Statistics provides details on the container types in use. -type Statistics struct { - Cardinality uint64 - Containers uint64 - - ArrayContainers uint64 - ArrayContainerBytes uint64 - ArrayContainerValues uint64 - - BitmapContainers uint64 - BitmapContainerBytes uint64 - BitmapContainerValues uint64 - - RunContainers uint64 - RunContainerBytes uint64 - RunContainerValues uint64 -} - -// Stats returns details on container type usage in a Statistics struct. -func (rb *Bitmap) Stats() Statistics { - stats := Statistics{} - stats.Containers = uint64(len(rb.highlowcontainer.containers)) - for _, c := range rb.highlowcontainer.containers { - stats.Cardinality += uint64(c.getCardinality()) - - switch c.(type) { - case *arrayContainer: - stats.ArrayContainers++ - stats.ArrayContainerBytes += uint64(c.getSizeInBytes()) - stats.ArrayContainerValues += uint64(c.getCardinality()) - case *bitmapContainer: - stats.BitmapContainers++ - stats.BitmapContainerBytes += uint64(c.getSizeInBytes()) - stats.BitmapContainerValues += uint64(c.getCardinality()) - case *runContainer16: - stats.RunContainers++ - stats.RunContainerBytes += uint64(c.getSizeInBytes()) - stats.RunContainerValues += uint64(c.getCardinality()) - } - } - return stats -} diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go deleted file mode 100644 index d9d5edda7..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go +++ /dev/null @@ -1,837 +0,0 @@ -package roaring - -import ( - "bytes" - "encoding/binary" - "fmt" - snappy "github.com/glycerine/go-unsnap-stream" - "github.com/tinylib/msgp/msgp" - "io" -) - -//go:generate msgp -unexported - -type container interface { - addOffset(uint16) []container - - clone() container - and(container) container - andCardinality(container) int - iand(container) container // i stands for inplace - andNot(container) container - iandNot(container) container // i stands for inplace - getCardinality() int - // rank returns the number of integers that are - // smaller or equal to x. rank(infinity) would be getCardinality(). - rank(uint16) int - - iadd(x uint16) bool // inplace, returns true if x was new. - iaddReturnMinimized(uint16) container // may change return type to minimize storage. - - //addRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused) - iaddRange(start, endx int) container // i stands for inplace, range is [firstOfRange,endx) - - iremove(x uint16) bool // inplace, returns true if x was present. - iremoveReturnMinimized(uint16) container // may change return type to minimize storage. - - not(start, final int) container // range is [firstOfRange,lastOfRange) - inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx) - xor(r container) container - getShortIterator() shortPeekable - getReverseIterator() shortIterable - getManyIterator() manyIterable - contains(i uint16) bool - maximum() uint16 - minimum() uint16 - - // equals is now logical equals; it does not require the - // same underlying container types, but compares across - // any of the implementations. - equals(r container) bool - - fillLeastSignificant16bits(array []uint32, i int, mask uint32) - or(r container) container - orCardinality(r container) int - isFull() bool - ior(r container) container // i stands for inplace - intersects(r container) bool // whether the two containers intersect - lazyOR(r container) container - lazyIOR(r container) container - getSizeInBytes() int - //removeRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused) - iremoveRange(start, final int) container // i stands for inplace, range is [firstOfRange,lastOfRange) - selectInt(x uint16) int // selectInt returns the xth integer in the container - serializedSizeInBytes() int - writeTo(io.Writer) (int, error) - - numberOfRuns() int - toEfficientContainer() container - String() string - containerType() contype -} - -type contype uint8 - -const ( - bitmapContype contype = iota - arrayContype - run16Contype - run32Contype -) - -// careful: range is [firstOfRange,lastOfRange] -func rangeOfOnes(start, last int) container { - if start > MaxUint16 { - panic("rangeOfOnes called with start > MaxUint16") - } - if last > MaxUint16 { - panic("rangeOfOnes called with last > MaxUint16") - } - if start < 0 { - panic("rangeOfOnes called with start < 0") - } - if last < 0 { - panic("rangeOfOnes called with last < 0") - } - return newRunContainer16Range(uint16(start), uint16(last)) -} - -type roaringArray struct { - keys []uint16 - containers []container `msg:"-"` // don't try to serialize directly. - needCopyOnWrite []bool - copyOnWrite bool - - // conserz is used at serialization time - // to serialize containers. Otherwise empty. - conserz []containerSerz -} - -// containerSerz facilitates serializing container (tricky to -// serialize because it is an interface) by providing a -// light wrapper with a type identifier. -type containerSerz struct { - t contype `msg:"t"` // type - r msgp.Raw `msg:"r"` // Raw msgpack of the actual container type -} - -func newRoaringArray() *roaringArray { - return &roaringArray{} -} - -// runOptimize compresses the element containers to minimize space consumed. -// Q: how does this interact with copyOnWrite and needCopyOnWrite? -// A: since we aren't changing the logical content, just the representation, -// we don't bother to check the needCopyOnWrite bits. We replace -// (possibly all) elements of ra.containers in-place with space -// optimized versions. -func (ra *roaringArray) runOptimize() { - for i := range ra.containers { - ra.containers[i] = ra.containers[i].toEfficientContainer() - } -} - -func (ra *roaringArray) appendContainer(key uint16, value container, mustCopyOnWrite bool) { - ra.keys = append(ra.keys, key) - ra.containers = append(ra.containers, value) - ra.needCopyOnWrite = append(ra.needCopyOnWrite, mustCopyOnWrite) -} - -func (ra *roaringArray) appendWithoutCopy(sa roaringArray, startingindex int) { - mustCopyOnWrite := sa.needCopyOnWrite[startingindex] - ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], mustCopyOnWrite) -} - -func (ra *roaringArray) appendCopy(sa roaringArray, startingindex int) { - // cow only if the two request it, or if we already have a lightweight copy - copyonwrite := (ra.copyOnWrite && sa.copyOnWrite) || sa.needsCopyOnWrite(startingindex) - if !copyonwrite { - // since there is no copy-on-write, we need to clone the container (this is important) - ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex].clone(), copyonwrite) - } else { - ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], copyonwrite) - if !sa.needsCopyOnWrite(startingindex) { - sa.setNeedsCopyOnWrite(startingindex) - } - } -} - -func (ra *roaringArray) appendWithoutCopyMany(sa roaringArray, startingindex, end int) { - for i := startingindex; i < end; i++ { - ra.appendWithoutCopy(sa, i) - } -} - -func (ra *roaringArray) appendCopyMany(sa roaringArray, startingindex, end int) { - for i := startingindex; i < end; i++ { - ra.appendCopy(sa, i) - } -} - -func (ra *roaringArray) appendCopiesUntil(sa roaringArray, stoppingKey uint16) { - // cow only if the two request it, or if we already have a lightweight copy - copyonwrite := ra.copyOnWrite && sa.copyOnWrite - - for i := 0; i < sa.size(); i++ { - if sa.keys[i] >= stoppingKey { - break - } - thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i) - if thiscopyonewrite { - ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite) - if !sa.needsCopyOnWrite(i) { - sa.setNeedsCopyOnWrite(i) - } - - } else { - // since there is no copy-on-write, we need to clone the container (this is important) - ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite) - - } - } -} - -func (ra *roaringArray) appendCopiesAfter(sa roaringArray, beforeStart uint16) { - // cow only if the two request it, or if we already have a lightweight copy - copyonwrite := ra.copyOnWrite && sa.copyOnWrite - - startLocation := sa.getIndex(beforeStart) - if startLocation >= 0 { - startLocation++ - } else { - startLocation = -startLocation - 1 - } - - for i := startLocation; i < sa.size(); i++ { - thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i) - if thiscopyonewrite { - ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite) - if !sa.needsCopyOnWrite(i) { - sa.setNeedsCopyOnWrite(i) - } - } else { - // since there is no copy-on-write, we need to clone the container (this is important) - ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite) - - } - } -} - -func (ra *roaringArray) removeIndexRange(begin, end int) { - if end <= begin { - return - } - - r := end - begin - - copy(ra.keys[begin:], ra.keys[end:]) - copy(ra.containers[begin:], ra.containers[end:]) - copy(ra.needCopyOnWrite[begin:], ra.needCopyOnWrite[end:]) - - ra.resize(len(ra.keys) - r) -} - -func (ra *roaringArray) resize(newsize int) { - for k := newsize; k < len(ra.containers); k++ { - ra.containers[k] = nil - } - - ra.keys = ra.keys[:newsize] - ra.containers = ra.containers[:newsize] - ra.needCopyOnWrite = ra.needCopyOnWrite[:newsize] -} - -func (ra *roaringArray) clear() { - ra.resize(0) - ra.copyOnWrite = false - ra.conserz = nil -} - -func (ra *roaringArray) clone() *roaringArray { - - sa := roaringArray{} - sa.copyOnWrite = ra.copyOnWrite - - // this is where copyOnWrite is used. - if ra.copyOnWrite { - sa.keys = make([]uint16, len(ra.keys)) - copy(sa.keys, ra.keys) - sa.containers = make([]container, len(ra.containers)) - copy(sa.containers, ra.containers) - sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite)) - - ra.markAllAsNeedingCopyOnWrite() - sa.markAllAsNeedingCopyOnWrite() - - // sa.needCopyOnWrite is shared - } else { - // make a full copy - - sa.keys = make([]uint16, len(ra.keys)) - copy(sa.keys, ra.keys) - - sa.containers = make([]container, len(ra.containers)) - for i := range sa.containers { - sa.containers[i] = ra.containers[i].clone() - } - - sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite)) - } - return &sa -} - -// clone all containers which have needCopyOnWrite set to true -// This can be used to make sure it is safe to munmap a []byte -// that the roaring array may still have a reference to. -func (ra *roaringArray) cloneCopyOnWriteContainers() { - for i, needCopyOnWrite := range ra.needCopyOnWrite { - if needCopyOnWrite { - ra.containers[i] = ra.containers[i].clone() - ra.needCopyOnWrite[i] = false - } - } -} - -// unused function: -//func (ra *roaringArray) containsKey(x uint16) bool { -// return (ra.binarySearch(0, int64(len(ra.keys)), x) >= 0) -//} - -func (ra *roaringArray) getContainer(x uint16) container { - i := ra.binarySearch(0, int64(len(ra.keys)), x) - if i < 0 { - return nil - } - return ra.containers[i] -} - -func (ra *roaringArray) getContainerAtIndex(i int) container { - return ra.containers[i] -} - -func (ra *roaringArray) getFastContainerAtIndex(i int, needsWriteable bool) container { - c := ra.getContainerAtIndex(i) - switch t := c.(type) { - case *arrayContainer: - c = t.toBitmapContainer() - case *runContainer16: - if !t.isFull() { - c = t.toBitmapContainer() - } - case *bitmapContainer: - if needsWriteable && ra.needCopyOnWrite[i] { - c = ra.containers[i].clone() - } - } - return c -} - -func (ra *roaringArray) getWritableContainerAtIndex(i int) container { - if ra.needCopyOnWrite[i] { - ra.containers[i] = ra.containers[i].clone() - ra.needCopyOnWrite[i] = false - } - return ra.containers[i] -} - -func (ra *roaringArray) getIndex(x uint16) int { - // before the binary search, we optimize for frequent cases - size := len(ra.keys) - if (size == 0) || (ra.keys[size-1] == x) { - return size - 1 - } - return ra.binarySearch(0, int64(size), x) -} - -func (ra *roaringArray) getKeyAtIndex(i int) uint16 { - return ra.keys[i] -} - -func (ra *roaringArray) insertNewKeyValueAt(i int, key uint16, value container) { - ra.keys = append(ra.keys, 0) - ra.containers = append(ra.containers, nil) - - copy(ra.keys[i+1:], ra.keys[i:]) - copy(ra.containers[i+1:], ra.containers[i:]) - - ra.keys[i] = key - ra.containers[i] = value - - ra.needCopyOnWrite = append(ra.needCopyOnWrite, false) - copy(ra.needCopyOnWrite[i+1:], ra.needCopyOnWrite[i:]) - ra.needCopyOnWrite[i] = false -} - -func (ra *roaringArray) remove(key uint16) bool { - i := ra.binarySearch(0, int64(len(ra.keys)), key) - if i >= 0 { // if a new key - ra.removeAtIndex(i) - return true - } - return false -} - -func (ra *roaringArray) removeAtIndex(i int) { - copy(ra.keys[i:], ra.keys[i+1:]) - copy(ra.containers[i:], ra.containers[i+1:]) - - copy(ra.needCopyOnWrite[i:], ra.needCopyOnWrite[i+1:]) - - ra.resize(len(ra.keys) - 1) -} - -func (ra *roaringArray) setContainerAtIndex(i int, c container) { - ra.containers[i] = c -} - -func (ra *roaringArray) replaceKeyAndContainerAtIndex(i int, key uint16, c container, mustCopyOnWrite bool) { - ra.keys[i] = key - ra.containers[i] = c - ra.needCopyOnWrite[i] = mustCopyOnWrite -} - -func (ra *roaringArray) size() int { - return len(ra.keys) -} - -func (ra *roaringArray) binarySearch(begin, end int64, ikey uint16) int { - low := begin - high := end - 1 - for low+16 <= high { - middleIndex := low + (high-low)/2 // avoid overflow - middleValue := ra.keys[middleIndex] - - if middleValue < ikey { - low = middleIndex + 1 - } else if middleValue > ikey { - high = middleIndex - 1 - } else { - return int(middleIndex) - } - } - for ; low <= high; low++ { - val := ra.keys[low] - if val >= ikey { - if val == ikey { - return int(low) - } - break - } - } - return -int(low + 1) -} - -func (ra *roaringArray) equals(o interface{}) bool { - srb, ok := o.(roaringArray) - if ok { - - if srb.size() != ra.size() { - return false - } - for i, k := range ra.keys { - if k != srb.keys[i] { - return false - } - } - - for i, c := range ra.containers { - if !c.equals(srb.containers[i]) { - return false - } - } - return true - } - return false -} - -func (ra *roaringArray) headerSize() uint64 { - size := uint64(len(ra.keys)) - if ra.hasRunCompression() { - if size < noOffsetThreshold { // for small bitmaps, we omit the offsets - return 4 + (size+7)/8 + 4*size - } - return 4 + (size+7)/8 + 8*size // - 4 because we pack the size with the cookie - } - return 4 + 4 + 8*size - -} - -// should be dirt cheap -func (ra *roaringArray) serializedSizeInBytes() uint64 { - answer := ra.headerSize() - for _, c := range ra.containers { - answer += uint64(c.serializedSizeInBytes()) - } - return answer -} - -// -// spec: https://github.com/RoaringBitmap/RoaringFormatSpec -// -func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) { - hasRun := ra.hasRunCompression() - isRunSizeInBytes := 0 - cookieSize := 8 - if hasRun { - cookieSize = 4 - isRunSizeInBytes = (len(ra.keys) + 7) / 8 - } - descriptiveHeaderSize := 4 * len(ra.keys) - preambleSize := cookieSize + isRunSizeInBytes + descriptiveHeaderSize - - buf := make([]byte, preambleSize+4*len(ra.keys)) - - nw := 0 - - if hasRun { - binary.LittleEndian.PutUint16(buf[0:], uint16(serialCookie)) - nw += 2 - binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1)) - nw += 2 - - // compute isRun bitmap - var ir []byte - - isRun := newBitmapContainer() - for i, c := range ra.containers { - switch c.(type) { - case *runContainer16: - isRun.iadd(uint16(i)) - } - } - // convert to little endian - ir = isRun.asLittleEndianByteSlice()[:isRunSizeInBytes] - nw += copy(buf[nw:], ir) - } else { - binary.LittleEndian.PutUint32(buf[0:], uint32(serialCookieNoRunContainer)) - nw += 4 - binary.LittleEndian.PutUint32(buf[4:], uint32(len(ra.keys))) - nw += 4 - } - - // descriptive header - for i, key := range ra.keys { - binary.LittleEndian.PutUint16(buf[nw:], key) - nw += 2 - c := ra.containers[i] - binary.LittleEndian.PutUint16(buf[nw:], uint16(c.getCardinality()-1)) - nw += 2 - } - - startOffset := int64(preambleSize + 4*len(ra.keys)) - if !hasRun || (len(ra.keys) >= noOffsetThreshold) { - // offset header - for _, c := range ra.containers { - binary.LittleEndian.PutUint32(buf[nw:], uint32(startOffset)) - nw += 4 - switch rc := c.(type) { - case *runContainer16: - startOffset += 2 + int64(len(rc.iv))*4 - default: - startOffset += int64(getSizeInBytesFromCardinality(c.getCardinality())) - } - } - } - - written, err := w.Write(buf[:nw]) - if err != nil { - return n, err - } - n += int64(written) - - for _, c := range ra.containers { - written, err := c.writeTo(w) - if err != nil { - return n, err - } - n += int64(written) - } - return n, nil -} - -// -// spec: https://github.com/RoaringBitmap/RoaringFormatSpec -// -func (ra *roaringArray) toBytes() ([]byte, error) { - var buf bytes.Buffer - _, err := ra.writeTo(&buf) - return buf.Bytes(), err -} - -func (ra *roaringArray) readFrom(stream byteInput) (int64, error) { - cookie, err := stream.readUInt32() - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err) - } - - var size uint32 - var isRunBitmap []byte - - if cookie&0x0000FFFF == serialCookie { - size = uint32(uint16(cookie>>16) + 1) - // create is-run-container bitmap - isRunBitmapSize := (int(size) + 7) / 8 - isRunBitmap, err = stream.next(isRunBitmapSize) - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("malformed bitmap, failed to read is-run bitmap, got: %s", err) - } - } else if cookie == serialCookieNoRunContainer { - size, err = stream.readUInt32() - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("malformed bitmap, failed to read a bitmap size: %s", err) - } - } else { - return stream.getReadBytes(), fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header") - } - - if size > (1 << 16) { - return stream.getReadBytes(), fmt.Errorf("it is logically impossible to have more than (1<<16) containers") - } - - // descriptive header - buf, err := stream.next(2 * 2 * int(size)) - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read descriptive header: %s", err) - } - - keycard := byteSliceAsUint16Slice(buf) - - if isRunBitmap == nil || size >= noOffsetThreshold { - if err := stream.skipBytes(int(size) * 4); err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to skip bytes: %s", err) - } - } - - // Allocate slices upfront as number of containers is known - if cap(ra.containers) >= int(size) { - ra.containers = ra.containers[:size] - } else { - ra.containers = make([]container, size) - } - - if cap(ra.keys) >= int(size) { - ra.keys = ra.keys[:size] - } else { - ra.keys = make([]uint16, size) - } - - if cap(ra.needCopyOnWrite) >= int(size) { - ra.needCopyOnWrite = ra.needCopyOnWrite[:size] - } else { - ra.needCopyOnWrite = make([]bool, size) - } - - for i := uint32(0); i < size; i++ { - key := keycard[2*i] - card := int(keycard[2*i+1]) + 1 - ra.keys[i] = key - ra.needCopyOnWrite[i] = true - - if isRunBitmap != nil && isRunBitmap[i/8]&(1<<(i%8)) != 0 { - // run container - nr, err := stream.readUInt16() - - if err != nil { - return 0, fmt.Errorf("failed to read runtime container size: %s", err) - } - - buf, err := stream.next(int(nr) * 4) - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read runtime container content: %s", err) - } - - nb := runContainer16{ - iv: byteSliceAsInterval16Slice(buf), - card: int64(card), - } - - ra.containers[i] = &nb - } else if card > arrayDefaultMaxSize { - // bitmap container - buf, err := stream.next(arrayDefaultMaxSize * 2) - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read bitmap container: %s", err) - } - - nb := bitmapContainer{ - cardinality: card, - bitmap: byteSliceAsUint64Slice(buf), - } - - ra.containers[i] = &nb - } else { - // array container - buf, err := stream.next(card * 2) - - if err != nil { - return stream.getReadBytes(), fmt.Errorf("failed to read array container: %s", err) - } - - nb := arrayContainer{ - byteSliceAsUint16Slice(buf), - } - - ra.containers[i] = &nb - } - } - - return stream.getReadBytes(), nil -} - -func (ra *roaringArray) hasRunCompression() bool { - for _, c := range ra.containers { - switch c.(type) { - case *runContainer16: - return true - } - } - return false -} - -func (ra *roaringArray) writeToMsgpack(stream io.Writer) error { - - ra.conserz = make([]containerSerz, len(ra.containers)) - for i, v := range ra.containers { - switch cn := v.(type) { - case *bitmapContainer: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = bitmapContype - ra.conserz[i].r = bts - case *arrayContainer: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = arrayContype - ra.conserz[i].r = bts - case *runContainer16: - bts, err := cn.MarshalMsg(nil) - if err != nil { - return err - } - ra.conserz[i].t = run16Contype - ra.conserz[i].r = bts - default: - panic(fmt.Errorf("Unrecognized container implementation: %T", cn)) - } - } - w := snappy.NewWriter(stream) - err := msgp.Encode(w, ra) - ra.conserz = nil - return err -} - -func (ra *roaringArray) readFromMsgpack(stream io.Reader) error { - r := snappy.NewReader(stream) - err := msgp.Decode(r, ra) - if err != nil { - return err - } - - if len(ra.containers) != len(ra.keys) { - ra.containers = make([]container, len(ra.keys)) - } - - for i, v := range ra.conserz { - switch v.t { - case bitmapContype: - c := &bitmapContainer{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - case arrayContype: - c := &arrayContainer{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - case run16Contype: - c := &runContainer16{} - _, err = c.UnmarshalMsg(v.r) - if err != nil { - return err - } - ra.containers[i] = c - default: - return fmt.Errorf("unrecognized contype serialization code: '%v'", v.t) - } - } - ra.conserz = nil - return nil -} - -func (ra *roaringArray) advanceUntil(min uint16, pos int) int { - lower := pos + 1 - - if lower >= len(ra.keys) || ra.keys[lower] >= min { - return lower - } - - spansize := 1 - - for lower+spansize < len(ra.keys) && ra.keys[lower+spansize] < min { - spansize *= 2 - } - var upper int - if lower+spansize < len(ra.keys) { - upper = lower + spansize - } else { - upper = len(ra.keys) - 1 - } - - if ra.keys[upper] == min { - return upper - } - - if ra.keys[upper] < min { - // means - // array - // has no - // item - // >= min - // pos = array.length; - return len(ra.keys) - } - - // we know that the next-smallest span was too small - lower += (spansize >> 1) - - mid := 0 - for lower+1 != upper { - mid = (lower + upper) >> 1 - if ra.keys[mid] == min { - return mid - } else if ra.keys[mid] < min { - lower = mid - } else { - upper = mid - } - } - return upper -} - -func (ra *roaringArray) markAllAsNeedingCopyOnWrite() { - for i := range ra.needCopyOnWrite { - ra.needCopyOnWrite[i] = true - } -} - -func (ra *roaringArray) needsCopyOnWrite(i int) bool { - return ra.needCopyOnWrite[i] -} - -func (ra *roaringArray) setNeedsCopyOnWrite(i int) { - ra.needCopyOnWrite[i] = true -} diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go deleted file mode 100644 index dcd718756..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go +++ /dev/null @@ -1,529 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import ( - "github.com/tinylib/msgp/msgp" -) - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zxvk uint32 - zxvk, err = dc.ReadMapHeader() - if err != nil { - return - } - for zxvk > 0 { - zxvk-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zbzg uint8 - zbzg, err = dc.ReadUint8() - z.t = contype(zbzg) - } - if err != nil { - return - } - case "r": - err = z.r.DecodeMsg(dc) - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "t" - err = en.Append(0x82, 0xa1, 0x74) - if err != nil { - return err - } - err = en.WriteUint8(uint8(z.t)) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return err - } - err = z.r.EncodeMsg(en) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "t" - o = append(o, 0x82, 0xa1, 0x74) - o = msgp.AppendUint8(o, uint8(z.t)) - // string "r" - o = append(o, 0xa1, 0x72) - o, err = z.r.MarshalMsg(o) - if err != nil { - return - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zbai uint32 - zbai, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zcmr uint8 - zcmr, bts, err = msgp.ReadUint8Bytes(bts) - z.t = contype(zcmr) - } - if err != nil { - return - } - case "r": - bts, err = z.r.UnmarshalMsg(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *containerSerz) Msgsize() (s int) { - s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize() - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) { - { - var zajw uint8 - zajw, err = dc.ReadUint8() - (*z) = contype(zajw) - } - if err != nil { - return - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z contype) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteUint8(uint8(z)) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z contype) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendUint8(o, uint8(z)) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) { - { - var zwht uint8 - zwht, bts, err = msgp.ReadUint8Bytes(bts) - (*z) = contype(zwht) - } - if err != nil { - return - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z contype) Msgsize() (s int) { - s = msgp.Uint8Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zlqf uint32 - zlqf, err = dc.ReadMapHeader() - if err != nil { - return - } - for zlqf > 0 { - zlqf-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "keys": - var zdaf uint32 - zdaf, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.keys) >= int(zdaf) { - z.keys = (z.keys)[:zdaf] - } else { - z.keys = make([]uint16, zdaf) - } - for zhct := range z.keys { - z.keys[zhct], err = dc.ReadUint16() - if err != nil { - return - } - } - case "needCopyOnWrite": - var zpks uint32 - zpks, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.needCopyOnWrite) >= int(zpks) { - z.needCopyOnWrite = (z.needCopyOnWrite)[:zpks] - } else { - z.needCopyOnWrite = make([]bool, zpks) - } - for zcua := range z.needCopyOnWrite { - z.needCopyOnWrite[zcua], err = dc.ReadBool() - if err != nil { - return - } - } - case "copyOnWrite": - z.copyOnWrite, err = dc.ReadBool() - if err != nil { - return - } - case "conserz": - var zjfb uint32 - zjfb, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.conserz) >= int(zjfb) { - z.conserz = (z.conserz)[:zjfb] - } else { - z.conserz = make([]containerSerz, zjfb) - } - for zxhx := range z.conserz { - var zcxo uint32 - zcxo, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcxo > 0 { - zcxo-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zeff uint8 - zeff, err = dc.ReadUint8() - z.conserz[zxhx].t = contype(zeff) - } - if err != nil { - return - } - case "r": - err = z.conserz[zxhx].r.DecodeMsg(dc) - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 - // write "keys" - err = en.Append(0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.keys))) - if err != nil { - return - } - for zhct := range z.keys { - err = en.WriteUint16(z.keys[zhct]) - if err != nil { - return - } - } - // write "needCopyOnWrite" - err = en.Append(0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.needCopyOnWrite))) - if err != nil { - return - } - for zcua := range z.needCopyOnWrite { - err = en.WriteBool(z.needCopyOnWrite[zcua]) - if err != nil { - return - } - } - // write "copyOnWrite" - err = en.Append(0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - if err != nil { - return err - } - err = en.WriteBool(z.copyOnWrite) - if err != nil { - return - } - // write "conserz" - err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.conserz))) - if err != nil { - return - } - for zxhx := range z.conserz { - // map header, size 2 - // write "t" - err = en.Append(0x82, 0xa1, 0x74) - if err != nil { - return err - } - err = en.WriteUint8(uint8(z.conserz[zxhx].t)) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return err - } - err = z.conserz[zxhx].r.EncodeMsg(en) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 4 - // string "keys" - o = append(o, 0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.keys))) - for zhct := range z.keys { - o = msgp.AppendUint16(o, z.keys[zhct]) - } - // string "needCopyOnWrite" - o = append(o, 0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - o = msgp.AppendArrayHeader(o, uint32(len(z.needCopyOnWrite))) - for zcua := range z.needCopyOnWrite { - o = msgp.AppendBool(o, z.needCopyOnWrite[zcua]) - } - // string "copyOnWrite" - o = append(o, 0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65) - o = msgp.AppendBool(o, z.copyOnWrite) - // string "conserz" - o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a) - o = msgp.AppendArrayHeader(o, uint32(len(z.conserz))) - for zxhx := range z.conserz { - // map header, size 2 - // string "t" - o = append(o, 0x82, 0xa1, 0x74) - o = msgp.AppendUint8(o, uint8(z.conserz[zxhx].t)) - // string "r" - o = append(o, 0xa1, 0x72) - o, err = z.conserz[zxhx].r.MarshalMsg(o) - if err != nil { - return - } - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zrsw uint32 - zrsw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zrsw > 0 { - zrsw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "keys": - var zxpk uint32 - zxpk, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.keys) >= int(zxpk) { - z.keys = (z.keys)[:zxpk] - } else { - z.keys = make([]uint16, zxpk) - } - for zhct := range z.keys { - z.keys[zhct], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - case "needCopyOnWrite": - var zdnj uint32 - zdnj, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.needCopyOnWrite) >= int(zdnj) { - z.needCopyOnWrite = (z.needCopyOnWrite)[:zdnj] - } else { - z.needCopyOnWrite = make([]bool, zdnj) - } - for zcua := range z.needCopyOnWrite { - z.needCopyOnWrite[zcua], bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - return - } - } - case "copyOnWrite": - z.copyOnWrite, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - return - } - case "conserz": - var zobc uint32 - zobc, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.conserz) >= int(zobc) { - z.conserz = (z.conserz)[:zobc] - } else { - z.conserz = make([]containerSerz, zobc) - } - for zxhx := range z.conserz { - var zsnv uint32 - zsnv, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zsnv > 0 { - zsnv-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "t": - { - var zkgt uint8 - zkgt, bts, err = msgp.ReadUint8Bytes(bts) - z.conserz[zxhx].t = contype(zkgt) - } - if err != nil { - return - } - case "r": - bts, err = z.conserz[zxhx].r.UnmarshalMsg(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *roaringArray) Msgsize() (s int) { - s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize - for zxhx := range z.conserz { - s += 1 + 2 + msgp.Uint8Size + 2 + z.conserz[zxhx].r.Msgsize() - } - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go deleted file mode 100644 index cbffdaf24..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go +++ /dev/null @@ -1,2514 +0,0 @@ -package roaring - -// -// Copyright (c) 2016 by the roaring authors. -// Licensed under the Apache License, Version 2.0. -// -// We derive a few lines of code from the sort.Search -// function in the golang standard library. That function -// is Copyright 2009 The Go Authors, and licensed -// under the following BSD-style license. -/* -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -import ( - "fmt" - "sort" - "unsafe" -) - -//go:generate msgp -unexported - -// runContainer16 does run-length encoding of sets of -// uint16 integers. -type runContainer16 struct { - iv []interval16 - card int64 - - // avoid allocation during search - myOpts searchOptions `msg:"-"` -} - -// interval16 is the internal to runContainer16 -// structure that maintains the individual [start, last] -// closed intervals. -type interval16 struct { - start uint16 - length uint16 // length minus 1 -} - -func newInterval16Range(start, last uint16) interval16 { - if last < start { - panic(fmt.Sprintf("last (%d) cannot be smaller than start (%d)", last, start)) - } - - return interval16{ - start, - last - start, - } -} - -// runlen returns the count of integers in the interval. -func (iv interval16) runlen() int64 { - return int64(iv.length) + 1 -} - -func (iv interval16) last() uint16 { - return iv.start + iv.length -} - -// String produces a human viewable string of the contents. -func (iv interval16) String() string { - return fmt.Sprintf("[%d, %d]", iv.start, iv.length) -} - -func ivalString16(iv []interval16) string { - var s string - var j int - var p interval16 - for j, p = range iv { - s += fmt.Sprintf("%v:[%d, %d], ", j, p.start, p.last()) - } - return s -} - -// String produces a human viewable string of the contents. -func (rc *runContainer16) String() string { - if len(rc.iv) == 0 { - return "runContainer16{}" - } - is := ivalString16(rc.iv) - return `runContainer16{` + is + `}` -} - -// uint16Slice is a sort.Sort convenience method -type uint16Slice []uint16 - -// Len returns the length of p. -func (p uint16Slice) Len() int { return len(p) } - -// Less returns p[i] < p[j] -func (p uint16Slice) Less(i, j int) bool { return p[i] < p[j] } - -// Swap swaps elements i and j. -func (p uint16Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -//msgp:ignore addHelper - -// addHelper helps build a runContainer16. -type addHelper16 struct { - runstart uint16 - runlen uint16 - actuallyAdded uint16 - m []interval16 - rc *runContainer16 -} - -func (ah *addHelper16) storeIval(runstart, runlen uint16) { - mi := interval16{start: runstart, length: runlen} - ah.m = append(ah.m, mi) -} - -func (ah *addHelper16) add(cur, prev uint16, i int) { - if cur == prev+1 { - ah.runlen++ - ah.actuallyAdded++ - } else { - if cur < prev { - panic(fmt.Sprintf("newRunContainer16FromVals sees "+ - "unsorted vals; vals[%v]=cur=%v < prev=%v. Sort your vals"+ - " before calling us with alreadySorted == true.", i, cur, prev)) - } - if cur == prev { - // ignore duplicates - } else { - ah.actuallyAdded++ - ah.storeIval(ah.runstart, ah.runlen) - ah.runstart = cur - ah.runlen = 0 - } - } -} - -// newRunContainerRange makes a new container made of just the specified closed interval [rangestart,rangelast] -func newRunContainer16Range(rangestart uint16, rangelast uint16) *runContainer16 { - rc := &runContainer16{} - rc.iv = append(rc.iv, newInterval16Range(rangestart, rangelast)) - return rc -} - -// newRunContainer16FromVals makes a new container from vals. -// -// For efficiency, vals should be sorted in ascending order. -// Ideally vals should not contain duplicates, but we detect and -// ignore them. If vals is already sorted in ascending order, then -// pass alreadySorted = true. Otherwise, for !alreadySorted, -// we will sort vals before creating a runContainer16 of them. -// We sort the original vals, so this will change what the -// caller sees in vals as a side effect. -func newRunContainer16FromVals(alreadySorted bool, vals ...uint16) *runContainer16 { - // keep this in sync with newRunContainer16FromArray below - - rc := &runContainer16{} - ah := addHelper16{rc: rc} - - if !alreadySorted { - sort.Sort(uint16Slice(vals)) - } - n := len(vals) - var cur, prev uint16 - switch { - case n == 0: - // nothing more - case n == 1: - ah.m = append(ah.m, newInterval16Range(vals[0], vals[0])) - ah.actuallyAdded++ - default: - ah.runstart = vals[0] - ah.actuallyAdded++ - for i := 1; i < n; i++ { - prev = vals[i-1] - cur = vals[i] - ah.add(cur, prev, i) - } - ah.storeIval(ah.runstart, ah.runlen) - } - rc.iv = ah.m - rc.card = int64(ah.actuallyAdded) - return rc -} - -// newRunContainer16FromBitmapContainer makes a new run container from bc, -// somewhat efficiently. For reference, see the Java -// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/RunContainer.java#L145-L192 -func newRunContainer16FromBitmapContainer(bc *bitmapContainer) *runContainer16 { - - rc := &runContainer16{} - nbrRuns := bc.numberOfRuns() - if nbrRuns == 0 { - return rc - } - rc.iv = make([]interval16, nbrRuns) - - longCtr := 0 // index of current long in bitmap - curWord := bc.bitmap[0] // its value - runCount := 0 - for { - // potentially multiword advance to first 1 bit - for curWord == 0 && longCtr < len(bc.bitmap)-1 { - longCtr++ - curWord = bc.bitmap[longCtr] - } - - if curWord == 0 { - // wrap up, no more runs - return rc - } - localRunStart := countTrailingZeros(curWord) - runStart := localRunStart + 64*longCtr - // stuff 1s into number's LSBs - curWordWith1s := curWord | (curWord - 1) - - // find the next 0, potentially in a later word - runEnd := 0 - for curWordWith1s == maxWord && longCtr < len(bc.bitmap)-1 { - longCtr++ - curWordWith1s = bc.bitmap[longCtr] - } - - if curWordWith1s == maxWord { - // a final unterminated run of 1s - runEnd = wordSizeInBits + longCtr*64 - rc.iv[runCount].start = uint16(runStart) - rc.iv[runCount].length = uint16(runEnd) - uint16(runStart) - 1 - return rc - } - localRunEnd := countTrailingZeros(^curWordWith1s) - runEnd = localRunEnd + longCtr*64 - rc.iv[runCount].start = uint16(runStart) - rc.iv[runCount].length = uint16(runEnd) - 1 - uint16(runStart) - runCount++ - // now, zero out everything right of runEnd. - curWord = curWordWith1s & (curWordWith1s + 1) - // We've lathered and rinsed, so repeat... - } - -} - -// -// newRunContainer16FromArray populates a new -// runContainer16 from the contents of arr. -// -func newRunContainer16FromArray(arr *arrayContainer) *runContainer16 { - // keep this in sync with newRunContainer16FromVals above - - rc := &runContainer16{} - ah := addHelper16{rc: rc} - - n := arr.getCardinality() - var cur, prev uint16 - switch { - case n == 0: - // nothing more - case n == 1: - ah.m = append(ah.m, newInterval16Range(arr.content[0], arr.content[0])) - ah.actuallyAdded++ - default: - ah.runstart = arr.content[0] - ah.actuallyAdded++ - for i := 1; i < n; i++ { - prev = arr.content[i-1] - cur = arr.content[i] - ah.add(cur, prev, i) - } - ah.storeIval(ah.runstart, ah.runlen) - } - rc.iv = ah.m - rc.card = int64(ah.actuallyAdded) - return rc -} - -// set adds the integers in vals to the set. Vals -// must be sorted in increasing order; if not, you should set -// alreadySorted to false, and we will sort them in place for you. -// (Be aware of this side effect -- it will affect the callers -// view of vals). -// -// If you have a small number of additions to an already -// big runContainer16, calling Add() may be faster. -func (rc *runContainer16) set(alreadySorted bool, vals ...uint16) { - - rc2 := newRunContainer16FromVals(alreadySorted, vals...) - un := rc.union(rc2) - rc.iv = un.iv - rc.card = 0 -} - -// canMerge returns true iff the intervals -// a and b either overlap or they are -// contiguous and so can be merged into -// a single interval. -func canMerge16(a, b interval16) bool { - if int64(a.last())+1 < int64(b.start) { - return false - } - return int64(b.last())+1 >= int64(a.start) -} - -// haveOverlap differs from canMerge in that -// it tells you if the intersection of a -// and b would contain an element (otherwise -// it would be the empty set, and we return -// false). -func haveOverlap16(a, b interval16) bool { - if int64(a.last())+1 <= int64(b.start) { - return false - } - return int64(b.last())+1 > int64(a.start) -} - -// mergeInterval16s joins a and b into a -// new interval, and panics if it cannot. -func mergeInterval16s(a, b interval16) (res interval16) { - if !canMerge16(a, b) { - panic(fmt.Sprintf("cannot merge %#v and %#v", a, b)) - } - - if b.start < a.start { - res.start = b.start - } else { - res.start = a.start - } - - if b.last() > a.last() { - res.length = b.last() - res.start - } else { - res.length = a.last() - res.start - } - - return -} - -// intersectInterval16s returns the intersection -// of a and b. The isEmpty flag will be true if -// a and b were disjoint. -func intersectInterval16s(a, b interval16) (res interval16, isEmpty bool) { - if !haveOverlap16(a, b) { - isEmpty = true - return - } - if b.start > a.start { - res.start = b.start - } else { - res.start = a.start - } - - bEnd := b.last() - aEnd := a.last() - var resEnd uint16 - - if bEnd < aEnd { - resEnd = bEnd - } else { - resEnd = aEnd - } - res.length = resEnd - res.start - return -} - -// union merges two runContainer16s, producing -// a new runContainer16 with the union of rc and b. -func (rc *runContainer16) union(b *runContainer16) *runContainer16 { - - // rc is also known as 'a' here, but golint insisted we - // call it rc for consistency with the rest of the methods. - - var m []interval16 - - alim := int64(len(rc.iv)) - blim := int64(len(b.iv)) - - var na int64 // next from a - var nb int64 // next from b - - // merged holds the current merge output, which might - // get additional merges before being appended to m. - var merged interval16 - var mergedUsed bool // is merged being used at the moment? - - var cura interval16 // currently considering this interval16 from a - var curb interval16 // currently considering this interval16 from b - - pass := 0 - for na < alim && nb < blim { - pass++ - cura = rc.iv[na] - curb = b.iv[nb] - - if mergedUsed { - mergedUpdated := false - if canMerge16(cura, merged) { - merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - mergedUpdated = true - } - if canMerge16(curb, merged) { - merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) - mergedUpdated = true - } - if !mergedUpdated { - // we know that merged is disjoint from cura and curb - m = append(m, merged) - mergedUsed = false - } - continue - - } else { - // !mergedUsed - if !canMerge16(cura, curb) { - if cura.start < curb.start { - m = append(m, cura) - na++ - } else { - m = append(m, curb) - nb++ - } - } else { - merged = mergeInterval16s(cura, curb) - mergedUsed = true - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) - } - } - } - var aDone, bDone bool - if na >= alim { - aDone = true - } - if nb >= blim { - bDone = true - } - // finish by merging anything remaining into merged we can: - if mergedUsed { - if !aDone { - aAdds: - for na < alim { - cura = rc.iv[na] - if canMerge16(cura, merged) { - merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - } else { - break aAdds - } - } - - } - - if !bDone { - bAdds: - for nb < blim { - curb = b.iv[nb] - if canMerge16(curb, merged) { - merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) - } else { - break bAdds - } - } - - } - - m = append(m, merged) - } - if na < alim { - m = append(m, rc.iv[na:]...) - } - if nb < blim { - m = append(m, b.iv[nb:]...) - } - - res := &runContainer16{iv: m} - return res -} - -// unionCardinality returns the cardinality of the merger of two runContainer16s, the union of rc and b. -func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 { - - // rc is also known as 'a' here, but golint insisted we - // call it rc for consistency with the rest of the methods. - answer := uint64(0) - - alim := int64(len(rc.iv)) - blim := int64(len(b.iv)) - - var na int64 // next from a - var nb int64 // next from b - - // merged holds the current merge output, which might - // get additional merges before being appended to m. - var merged interval16 - var mergedUsed bool // is merged being used at the moment? - - var cura interval16 // currently considering this interval16 from a - var curb interval16 // currently considering this interval16 from b - - pass := 0 - for na < alim && nb < blim { - pass++ - cura = rc.iv[na] - curb = b.iv[nb] - - if mergedUsed { - mergedUpdated := false - if canMerge16(cura, merged) { - merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - mergedUpdated = true - } - if canMerge16(curb, merged) { - merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) - mergedUpdated = true - } - if !mergedUpdated { - // we know that merged is disjoint from cura and curb - //m = append(m, merged) - answer += uint64(merged.last()) - uint64(merged.start) + 1 - mergedUsed = false - } - continue - - } else { - // !mergedUsed - if !canMerge16(cura, curb) { - if cura.start < curb.start { - answer += uint64(cura.last()) - uint64(cura.start) + 1 - //m = append(m, cura) - na++ - } else { - answer += uint64(curb.last()) - uint64(curb.start) + 1 - //m = append(m, curb) - nb++ - } - } else { - merged = mergeInterval16s(cura, curb) - mergedUsed = true - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) - } - } - } - var aDone, bDone bool - if na >= alim { - aDone = true - } - if nb >= blim { - bDone = true - } - // finish by merging anything remaining into merged we can: - if mergedUsed { - if !aDone { - aAdds: - for na < alim { - cura = rc.iv[na] - if canMerge16(cura, merged) { - merged = mergeInterval16s(cura, merged) - na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1) - } else { - break aAdds - } - } - - } - - if !bDone { - bAdds: - for nb < blim { - curb = b.iv[nb] - if canMerge16(curb, merged) { - merged = mergeInterval16s(curb, merged) - nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1) - } else { - break bAdds - } - } - - } - - //m = append(m, merged) - answer += uint64(merged.last()) - uint64(merged.start) + 1 - } - for _, r := range rc.iv[na:] { - answer += uint64(r.last()) - uint64(r.start) + 1 - } - for _, r := range b.iv[nb:] { - answer += uint64(r.last()) - uint64(r.start) + 1 - } - return answer -} - -// indexOfIntervalAtOrAfter is a helper for union. -func (rc *runContainer16) indexOfIntervalAtOrAfter(key int64, startIndex int64) int64 { - rc.myOpts.startIndex = startIndex - rc.myOpts.endxIndex = 0 - - w, already, _ := rc.search(key, &rc.myOpts) - if already { - return w - } - return w + 1 -} - -// intersect returns a new runContainer16 holding the -// intersection of rc (also known as 'a') and b. -func (rc *runContainer16) intersect(b *runContainer16) *runContainer16 { - - a := rc - numa := int64(len(a.iv)) - numb := int64(len(b.iv)) - res := &runContainer16{} - if numa == 0 || numb == 0 { - return res - } - - if numa == 1 && numb == 1 { - if !haveOverlap16(a.iv[0], b.iv[0]) { - return res - } - } - - var output []interval16 - - var acuri int64 - var bcuri int64 - - astart := int64(a.iv[acuri].start) - bstart := int64(b.iv[bcuri].start) - - var intersection interval16 - var leftoverstart int64 - var isOverlap, isLeftoverA, isLeftoverB bool - var done bool -toploop: - for acuri < numa && bcuri < numb { - - isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = - intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last())) - - if !isOverlap { - switch { - case astart < bstart: - acuri, done = a.findNextIntervalThatIntersectsStartingFrom(acuri+1, bstart) - if done { - break toploop - } - astart = int64(a.iv[acuri].start) - - case astart > bstart: - bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart) - if done { - break toploop - } - bstart = int64(b.iv[bcuri].start) - - //default: - // panic("impossible that astart == bstart, since !isOverlap") - } - - } else { - // isOverlap - output = append(output, intersection) - switch { - case isLeftoverA: - // note that we change astart without advancing acuri, - // since we need to capture any 2ndary intersections with a.iv[acuri] - astart = leftoverstart - bcuri++ - if bcuri >= numb { - break toploop - } - bstart = int64(b.iv[bcuri].start) - case isLeftoverB: - // note that we change bstart without advancing bcuri, - // since we need to capture any 2ndary intersections with b.iv[bcuri] - bstart = leftoverstart - acuri++ - if acuri >= numa { - break toploop - } - astart = int64(a.iv[acuri].start) - default: - // neither had leftover, both completely consumed - // optionally, assert for sanity: - //if a.iv[acuri].endx != b.iv[bcuri].endx { - // panic("huh? should only be possible that endx agree now!") - //} - - // advance to next a interval - acuri++ - if acuri >= numa { - break toploop - } - astart = int64(a.iv[acuri].start) - - // advance to next b interval - bcuri++ - if bcuri >= numb { - break toploop - } - bstart = int64(b.iv[bcuri].start) - } - } - } // end for toploop - - if len(output) == 0 { - return res - } - - res.iv = output - return res -} - -// intersectCardinality returns the cardinality of the -// intersection of rc (also known as 'a') and b. -func (rc *runContainer16) intersectCardinality(b *runContainer16) int64 { - answer := int64(0) - - a := rc - numa := int64(len(a.iv)) - numb := int64(len(b.iv)) - if numa == 0 || numb == 0 { - return 0 - } - - if numa == 1 && numb == 1 { - if !haveOverlap16(a.iv[0], b.iv[0]) { - return 0 - } - } - - var acuri int64 - var bcuri int64 - - astart := int64(a.iv[acuri].start) - bstart := int64(b.iv[bcuri].start) - - var intersection interval16 - var leftoverstart int64 - var isOverlap, isLeftoverA, isLeftoverB bool - var done bool - pass := 0 -toploop: - for acuri < numa && bcuri < numb { - pass++ - - isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = - intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last())) - - if !isOverlap { - switch { - case astart < bstart: - acuri, done = a.findNextIntervalThatIntersectsStartingFrom(acuri+1, bstart) - if done { - break toploop - } - astart = int64(a.iv[acuri].start) - - case astart > bstart: - bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart) - if done { - break toploop - } - bstart = int64(b.iv[bcuri].start) - - //default: - // panic("impossible that astart == bstart, since !isOverlap") - } - - } else { - // isOverlap - answer += int64(intersection.last()) - int64(intersection.start) + 1 - switch { - case isLeftoverA: - // note that we change astart without advancing acuri, - // since we need to capture any 2ndary intersections with a.iv[acuri] - astart = leftoverstart - bcuri++ - if bcuri >= numb { - break toploop - } - bstart = int64(b.iv[bcuri].start) - case isLeftoverB: - // note that we change bstart without advancing bcuri, - // since we need to capture any 2ndary intersections with b.iv[bcuri] - bstart = leftoverstart - acuri++ - if acuri >= numa { - break toploop - } - astart = int64(a.iv[acuri].start) - default: - // neither had leftover, both completely consumed - // optionally, assert for sanity: - //if a.iv[acuri].endx != b.iv[bcuri].endx { - // panic("huh? should only be possible that endx agree now!") - //} - - // advance to next a interval - acuri++ - if acuri >= numa { - break toploop - } - astart = int64(a.iv[acuri].start) - - // advance to next b interval - bcuri++ - if bcuri >= numb { - break toploop - } - bstart = int64(b.iv[bcuri].start) - } - } - } // end for toploop - - return answer -} - -// get returns true iff key is in the container. -func (rc *runContainer16) contains(key uint16) bool { - _, in, _ := rc.search(int64(key), nil) - return in -} - -// numIntervals returns the count of intervals in the container. -func (rc *runContainer16) numIntervals() int { - return len(rc.iv) -} - -// searchOptions allows us to accelerate search with -// prior knowledge of (mostly lower) bounds. This is used by Union -// and Intersect. -type searchOptions struct { - // start here instead of at 0 - startIndex int64 - - // upper bound instead of len(rc.iv); - // endxIndex == 0 means ignore the bound and use - // endxIndex == n ==len(rc.iv) which is also - // naturally the default for search() - // when opt = nil. - endxIndex int64 -} - -// search returns alreadyPresent to indicate if the -// key is already in one of our interval16s. -// -// If key is alreadyPresent, then whichInterval16 tells -// you where. -// -// If key is not already present, then whichInterval16 is -// set as follows: -// -// a) whichInterval16 == len(rc.iv)-1 if key is beyond our -// last interval16 in rc.iv; -// -// b) whichInterval16 == -1 if key is before our first -// interval16 in rc.iv; -// -// c) whichInterval16 is set to the minimum index of rc.iv -// which comes strictly before the key; -// so rc.iv[whichInterval16].last < key, -// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start -// (Note that whichInterval16+1 won't exist when -// whichInterval16 is the last interval.) -// -// runContainer16.search always returns whichInterval16 < len(rc.iv). -// -// If not nil, opts can be used to further restrict -// the search space. -// -func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval16 int64, alreadyPresent bool, numCompares int) { - n := int64(len(rc.iv)) - if n == 0 { - return -1, false, 0 - } - - startIndex := int64(0) - endxIndex := n - if opts != nil { - startIndex = opts.startIndex - - // let endxIndex == 0 mean no effect - if opts.endxIndex > 0 { - endxIndex = opts.endxIndex - } - } - - // sort.Search returns the smallest index i - // in [0, n) at which f(i) is true, assuming that on the range [0, n), - // f(i) == true implies f(i+1) == true. - // If there is no such index, Search returns n. - - // For correctness, this began as verbatim snippet from - // sort.Search in the Go standard lib. - // We inline our comparison function for speed, and - // annotate with numCompares - // to observe and test that extra bounds are utilized. - i, j := startIndex, endxIndex - for i < j { - h := i + (j-i)/2 // avoid overflow when computing h as the bisector - // i <= h < j - numCompares++ - if !(key < int64(rc.iv[h].start)) { - i = h + 1 - } else { - j = h - } - } - below := i - // end std lib snippet. - - // The above is a simple in-lining and annotation of: - /* below := sort.Search(n, - func(i int) bool { - return key < rc.iv[i].start - }) - */ - whichInterval16 = below - 1 - - if below == n { - // all falses => key is >= start of all interval16s - // ... so does it belong to the last interval16? - if key < int64(rc.iv[n-1].last())+1 { - // yes, it belongs to the last interval16 - alreadyPresent = true - return - } - // no, it is beyond the last interval16. - // leave alreadyPreset = false - return - } - - // INVAR: key is below rc.iv[below] - if below == 0 { - // key is before the first first interval16. - // leave alreadyPresent = false - return - } - - // INVAR: key is >= rc.iv[below-1].start and - // key is < rc.iv[below].start - - // is key in below-1 interval16? - if key >= int64(rc.iv[below-1].start) && key < int64(rc.iv[below-1].last())+1 { - // yes, it is. key is in below-1 interval16. - alreadyPresent = true - return - } - - // INVAR: key >= rc.iv[below-1].endx && key < rc.iv[below].start - // leave alreadyPresent = false - return -} - -// cardinality returns the count of the integers stored in the -// runContainer16. -func (rc *runContainer16) cardinality() int64 { - if len(rc.iv) == 0 { - rc.card = 0 - return 0 - } - if rc.card > 0 { - return rc.card // already cached - } - // have to compute it - var n int64 - for _, p := range rc.iv { - n += p.runlen() - } - rc.card = n // cache it - return n -} - -// AsSlice decompresses the contents into a []uint16 slice. -func (rc *runContainer16) AsSlice() []uint16 { - s := make([]uint16, rc.cardinality()) - j := 0 - for _, p := range rc.iv { - for i := p.start; i <= p.last(); i++ { - s[j] = i - j++ - } - } - return s -} - -// newRunContainer16 creates an empty run container. -func newRunContainer16() *runContainer16 { - return &runContainer16{} -} - -// newRunContainer16CopyIv creates a run container, initializing -// with a copy of the supplied iv slice. -// -func newRunContainer16CopyIv(iv []interval16) *runContainer16 { - rc := &runContainer16{ - iv: make([]interval16, len(iv)), - } - copy(rc.iv, iv) - return rc -} - -func (rc *runContainer16) Clone() *runContainer16 { - rc2 := newRunContainer16CopyIv(rc.iv) - return rc2 -} - -// newRunContainer16TakeOwnership returns a new runContainer16 -// backed by the provided iv slice, which we will -// assume exclusive control over from now on. -// -func newRunContainer16TakeOwnership(iv []interval16) *runContainer16 { - rc := &runContainer16{ - iv: iv, - } - return rc -} - -const baseRc16Size = int(unsafe.Sizeof(runContainer16{})) -const perIntervalRc16Size = int(unsafe.Sizeof(interval16{})) - -const baseDiskRc16Size = int(unsafe.Sizeof(uint16(0))) - -// see also runContainer16SerializedSizeInBytes(numRuns int) int - -// getSizeInBytes returns the number of bytes of memory -// required by this runContainer16. -func (rc *runContainer16) getSizeInBytes() int { - return perIntervalRc16Size*len(rc.iv) + baseRc16Size -} - -// runContainer16SerializedSizeInBytes returns the number of bytes of disk -// required to hold numRuns in a runContainer16. -func runContainer16SerializedSizeInBytes(numRuns int) int { - return perIntervalRc16Size*numRuns + baseDiskRc16Size -} - -// Add adds a single value k to the set. -func (rc *runContainer16) Add(k uint16) (wasNew bool) { - // TODO comment from runContainer16.java: - // it might be better and simpler to do return - // toBitmapOrArrayContainer(getCardinality()).add(k) - // but note that some unit tests use this method to build up test - // runcontainers without calling runOptimize - - k64 := int64(k) - - index, present, _ := rc.search(k64, nil) - if present { - return // already there - } - wasNew = true - - // increment card if it is cached already - if rc.card > 0 { - rc.card++ - } - n := int64(len(rc.iv)) - if index == -1 { - // we may need to extend the first run - if n > 0 { - if rc.iv[0].start == k+1 { - rc.iv[0].start = k - rc.iv[0].length++ - return - } - } - // nope, k stands alone, starting the new first interval16. - rc.iv = append([]interval16{newInterval16Range(k, k)}, rc.iv...) - return - } - - // are we off the end? handle both index == n and index == n-1: - if index >= n-1 { - if int64(rc.iv[n-1].last())+1 == k64 { - rc.iv[n-1].length++ - return - } - rc.iv = append(rc.iv, newInterval16Range(k, k)) - return - } - - // INVAR: index and index+1 both exist, and k goes between them. - // - // Now: add k into the middle, - // possibly fusing with index or index+1 interval16 - // and possibly resulting in fusing of two interval16s - // that had a one integer gap. - - left := index - right := index + 1 - - // are we fusing left and right by adding k? - if int64(rc.iv[left].last())+1 == k64 && int64(rc.iv[right].start) == k64+1 { - // fuse into left - rc.iv[left].length = rc.iv[right].last() - rc.iv[left].start - // remove redundant right - rc.iv = append(rc.iv[:left+1], rc.iv[right+1:]...) - return - } - - // are we an addition to left? - if int64(rc.iv[left].last())+1 == k64 { - // yes - rc.iv[left].length++ - return - } - - // are we an addition to right? - if int64(rc.iv[right].start) == k64+1 { - // yes - rc.iv[right].start = k - rc.iv[right].length++ - return - } - - // k makes a standalone new interval16, inserted in the middle - tail := append([]interval16{newInterval16Range(k, k)}, rc.iv[right:]...) - rc.iv = append(rc.iv[:left+1], tail...) - return -} - -//msgp:ignore runIterator - -// runIterator16 advice: you must call hasNext() -// before calling next()/peekNext() to insure there are contents. -type runIterator16 struct { - rc *runContainer16 - curIndex int64 - curPosInIndex uint16 -} - -// newRunIterator16 returns a new empty run container. -func (rc *runContainer16) newRunIterator16() *runIterator16 { - return &runIterator16{rc: rc, curIndex: 0, curPosInIndex: 0} -} - -// hasNext returns false if calling next will panic. It -// returns true when there is at least one more value -// available in the iteration sequence. -func (ri *runIterator16) hasNext() bool { - return int64(len(ri.rc.iv)) > ri.curIndex+1 || - (int64(len(ri.rc.iv)) == ri.curIndex+1 && ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex) -} - -// next returns the next value in the iteration sequence. -func (ri *runIterator16) next() uint16 { - next := ri.rc.iv[ri.curIndex].start + ri.curPosInIndex - - if ri.curPosInIndex == ri.rc.iv[ri.curIndex].length { - ri.curPosInIndex = 0 - ri.curIndex++ - } else { - ri.curPosInIndex++ - } - - return next -} - -// peekNext returns the next value in the iteration sequence without advancing the iterator -func (ri *runIterator16) peekNext() uint16 { - return ri.rc.iv[ri.curIndex].start + ri.curPosInIndex -} - -// advanceIfNeeded advances as long as the next value is smaller than minval -func (ri *runIterator16) advanceIfNeeded(minval uint16) { - if !ri.hasNext() || ri.peekNext() >= minval { - return - } - - opt := &searchOptions{ - startIndex: ri.curIndex, - endxIndex: int64(len(ri.rc.iv)), - } - - // interval cannot be -1 because of minval > peekNext - interval, isPresent, _ := ri.rc.search(int64(minval), opt) - - // if the minval is present, set the curPosIndex at the right position - if isPresent { - ri.curIndex = interval - ri.curPosInIndex = minval - ri.rc.iv[ri.curIndex].start - } else { - // otherwise interval is set to to the minimum index of rc.iv - // which comes strictly before the key, that's why we set the next interval - ri.curIndex = interval + 1 - ri.curPosInIndex = 0 - } -} - -// runReverseIterator16 advice: you must call hasNext() -// before calling next() to insure there are contents. -type runReverseIterator16 struct { - rc *runContainer16 - curIndex int64 // index into rc.iv - curPosInIndex uint16 // offset in rc.iv[curIndex] -} - -// newRunReverseIterator16 returns a new empty run iterator. -func (rc *runContainer16) newRunReverseIterator16() *runReverseIterator16 { - index := int64(len(rc.iv)) - 1 - pos := uint16(0) - - if index >= 0 { - pos = rc.iv[index].length - } - - return &runReverseIterator16{ - rc: rc, - curIndex: index, - curPosInIndex: pos, - } -} - -// hasNext returns false if calling next will panic. It -// returns true when there is at least one more value -// available in the iteration sequence. -func (ri *runReverseIterator16) hasNext() bool { - return ri.curIndex > 0 || ri.curIndex == 0 && ri.curPosInIndex >= 0 -} - -// next returns the next value in the iteration sequence. -func (ri *runReverseIterator16) next() uint16 { - next := ri.rc.iv[ri.curIndex].start + ri.curPosInIndex - - if ri.curPosInIndex > 0 { - ri.curPosInIndex-- - } else { - ri.curIndex-- - - if ri.curIndex >= 0 { - ri.curPosInIndex = ri.rc.iv[ri.curIndex].length - } - } - - return next -} - -func (rc *runContainer16) newManyRunIterator16() *runIterator16 { - return rc.newRunIterator16() -} - -// hs are the high bits to include to avoid needing to reiterate over the buffer in NextMany -func (ri *runIterator16) nextMany(hs uint32, buf []uint32) int { - n := 0 - - if !ri.hasNext() { - return n - } - - // start and end are inclusive - for n < len(buf) { - moreVals := 0 - - if ri.rc.iv[ri.curIndex].length >= ri.curPosInIndex { - // add as many as you can from this seq - moreVals = minOfInt(int(ri.rc.iv[ri.curIndex].length-ri.curPosInIndex)+1, len(buf)-n) - base := uint32(ri.rc.iv[ri.curIndex].start+ri.curPosInIndex) | hs - - // allows BCE - buf2 := buf[n : n+moreVals] - for i := range buf2 { - buf2[i] = base + uint32(i) - } - - // update values - n += moreVals - } - - if moreVals+int(ri.curPosInIndex) > int(ri.rc.iv[ri.curIndex].length) { - ri.curPosInIndex = 0 - ri.curIndex++ - - if ri.curIndex == int64(len(ri.rc.iv)) { - break - } - } else { - ri.curPosInIndex += uint16(moreVals) //moreVals always fits in uint16 - } - } - - return n -} - -// remove removes key from the container. -func (rc *runContainer16) removeKey(key uint16) (wasPresent bool) { - - var index int64 - index, wasPresent, _ = rc.search(int64(key), nil) - if !wasPresent { - return // already removed, nothing to do. - } - pos := key - rc.iv[index].start - rc.deleteAt(&index, &pos) - return -} - -// internal helper functions - -func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16) { - rc.card-- - ci := *curIndex - pos := *curPosInIndex - - // are we first, last, or in the middle of our interval16? - switch { - case pos == 0: - if int64(rc.iv[ci].length) == 0 { - // our interval disappears - rc.iv = append(rc.iv[:ci], rc.iv[ci+1:]...) - // curIndex stays the same, since the delete did - // the advance for us. - *curPosInIndex = 0 - } else { - rc.iv[ci].start++ // no longer overflowable - rc.iv[ci].length-- - } - case pos == rc.iv[ci].length: - // length - rc.iv[ci].length-- - // our interval16 cannot disappear, else we would have been pos == 0, case first above. - *curPosInIndex-- - // if we leave *curIndex alone, then Next() will work properly even after the delete. - default: - //middle - // split into two, adding an interval16 - new0 := newInterval16Range(rc.iv[ci].start, rc.iv[ci].start+*curPosInIndex-1) - - new1start := int64(rc.iv[ci].start+*curPosInIndex) + 1 - if new1start > int64(MaxUint16) { - panic("overflow?!?!") - } - new1 := newInterval16Range(uint16(new1start), rc.iv[ci].last()) - tail := append([]interval16{new0, new1}, rc.iv[ci+1:]...) - rc.iv = append(rc.iv[:ci], tail...) - // update curIndex and curPosInIndex - *curIndex++ - *curPosInIndex = 0 - } - -} - -func have4Overlap16(astart, alast, bstart, blast int64) bool { - if alast+1 <= bstart { - return false - } - return blast+1 > astart -} - -func intersectWithLeftover16(astart, alast, bstart, blast int64) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int64, intersection interval16) { - if !have4Overlap16(astart, alast, bstart, blast) { - return - } - isOverlap = true - - // do the intersection: - if bstart > astart { - intersection.start = uint16(bstart) - } else { - intersection.start = uint16(astart) - } - - switch { - case blast < alast: - isLeftoverA = true - leftoverstart = blast + 1 - intersection.length = uint16(blast) - intersection.start - case alast < blast: - isLeftoverB = true - leftoverstart = alast + 1 - intersection.length = uint16(alast) - intersection.start - default: - // alast == blast - intersection.length = uint16(alast) - intersection.start - } - - return -} - -func (rc *runContainer16) findNextIntervalThatIntersectsStartingFrom(startIndex int64, key int64) (index int64, done bool) { - - rc.myOpts.startIndex = startIndex - rc.myOpts.endxIndex = 0 - - w, _, _ := rc.search(key, &rc.myOpts) - // rc.search always returns w < len(rc.iv) - if w < startIndex { - // not found and comes before lower bound startIndex, - // so just use the lower bound. - if startIndex == int64(len(rc.iv)) { - // also this bump up means that we are done - return startIndex, true - } - return startIndex, false - } - - return w, false -} - -func sliceToString16(m []interval16) string { - s := "" - for i := range m { - s += fmt.Sprintf("%v: %s, ", i, m[i]) - } - return s -} - -// selectInt16 returns the j-th value in the container. -// We panic of j is out of bounds. -func (rc *runContainer16) selectInt16(j uint16) int { - n := rc.cardinality() - if int64(j) > n { - panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n)) - } - - var offset int64 - for k := range rc.iv { - nextOffset := offset + rc.iv[k].runlen() - if nextOffset > int64(j) { - return int(int64(rc.iv[k].start) + (int64(j) - offset)) - } - offset = nextOffset - } - panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n)) -} - -// helper for invert -func (rc *runContainer16) invertlastInterval(origin uint16, lastIdx int) []interval16 { - cur := rc.iv[lastIdx] - if cur.last() == MaxUint16 { - if cur.start == origin { - return nil // empty container - } - return []interval16{newInterval16Range(origin, cur.start-1)} - } - if cur.start == origin { - return []interval16{newInterval16Range(cur.last()+1, MaxUint16)} - } - // invert splits - return []interval16{ - newInterval16Range(origin, cur.start-1), - newInterval16Range(cur.last()+1, MaxUint16), - } -} - -// invert returns a new container (not inplace), that is -// the inversion of rc. For each bit b in rc, the -// returned value has !b -func (rc *runContainer16) invert() *runContainer16 { - ni := len(rc.iv) - var m []interval16 - switch ni { - case 0: - return &runContainer16{iv: []interval16{newInterval16Range(0, MaxUint16)}} - case 1: - return &runContainer16{iv: rc.invertlastInterval(0, 0)} - } - var invstart int64 - ult := ni - 1 - for i, cur := range rc.iv { - if i == ult { - // invertlastInteval will add both intervals (b) and (c) in - // diagram below. - m = append(m, rc.invertlastInterval(uint16(invstart), i)...) - break - } - // INVAR: i and cur are not the last interval, there is a next at i+1 - // - // ........[cur.start, cur.last] ...... [next.start, next.last].... - // ^ ^ ^ - // (a) (b) (c) - // - // Now: we add interval (a); but if (a) is empty, for cur.start==0, we skip it. - if cur.start > 0 { - m = append(m, newInterval16Range(uint16(invstart), cur.start-1)) - } - invstart = int64(cur.last() + 1) - } - return &runContainer16{iv: m} -} - -func (iv interval16) equal(b interval16) bool { - return iv.start == b.start && iv.length == b.length -} - -func (iv interval16) isSuperSetOf(b interval16) bool { - return iv.start <= b.start && b.last() <= iv.last() -} - -func (iv interval16) subtractInterval(del interval16) (left []interval16, delcount int64) { - isect, isEmpty := intersectInterval16s(iv, del) - - if isEmpty { - return nil, 0 - } - if del.isSuperSetOf(iv) { - return nil, iv.runlen() - } - - switch { - case isect.start > iv.start && isect.last() < iv.last(): - new0 := newInterval16Range(iv.start, isect.start-1) - new1 := newInterval16Range(isect.last()+1, iv.last()) - return []interval16{new0, new1}, isect.runlen() - case isect.start == iv.start: - return []interval16{newInterval16Range(isect.last()+1, iv.last())}, isect.runlen() - default: - return []interval16{newInterval16Range(iv.start, isect.start-1)}, isect.runlen() - } -} - -func (rc *runContainer16) isubtract(del interval16) { - origiv := make([]interval16, len(rc.iv)) - copy(origiv, rc.iv) - n := int64(len(rc.iv)) - if n == 0 { - return // already done. - } - - _, isEmpty := intersectInterval16s(newInterval16Range(rc.iv[0].start, rc.iv[n-1].last()), del) - if isEmpty { - return // done - } - - // INVAR there is some intersection between rc and del - istart, startAlready, _ := rc.search(int64(del.start), nil) - ilast, lastAlready, _ := rc.search(int64(del.last()), nil) - rc.card = -1 - if istart == -1 { - if ilast == n-1 && !lastAlready { - rc.iv = nil - return - } - } - // some intervals will remain - switch { - case startAlready && lastAlready: - res0, _ := rc.iv[istart].subtractInterval(del) - - // would overwrite values in iv b/c res0 can have len 2. so - // write to origiv instead. - lost := 1 + ilast - istart - changeSize := int64(len(res0)) - lost - newSize := int64(len(rc.iv)) + changeSize - - // rc.iv = append(pre, caboose...) - // return - - if ilast != istart { - res1, _ := rc.iv[ilast].subtractInterval(del) - res0 = append(res0, res1...) - changeSize = int64(len(res0)) - lost - newSize = int64(len(rc.iv)) + changeSize - } - switch { - case changeSize < 0: - // shrink - copy(rc.iv[istart+int64(len(res0)):], rc.iv[ilast+1:]) - copy(rc.iv[istart:istart+int64(len(res0))], res0) - rc.iv = rc.iv[:newSize] - return - case changeSize == 0: - // stay the same - copy(rc.iv[istart:istart+int64(len(res0))], res0) - return - default: - // changeSize > 0 is only possible when ilast == istart. - // Hence we now know: changeSize == 1 and len(res0) == 2 - rc.iv = append(rc.iv, interval16{}) - // len(rc.iv) is correct now, no need to rc.iv = rc.iv[:newSize] - - // copy the tail into place - copy(rc.iv[ilast+2:], rc.iv[ilast+1:]) - // copy the new item(s) into place - copy(rc.iv[istart:istart+2], res0) - return - } - - case !startAlready && !lastAlready: - // we get to discard whole intervals - - // from the search() definition: - - // if del.start is not present, then istart is - // set as follows: - // - // a) istart == n-1 if del.start is beyond our - // last interval16 in rc.iv; - // - // b) istart == -1 if del.start is before our first - // interval16 in rc.iv; - // - // c) istart is set to the minimum index of rc.iv - // which comes strictly before the del.start; - // so del.start > rc.iv[istart].last, - // and if istart+1 exists, then del.start < rc.iv[istart+1].startx - - // if del.last is not present, then ilast is - // set as follows: - // - // a) ilast == n-1 if del.last is beyond our - // last interval16 in rc.iv; - // - // b) ilast == -1 if del.last is before our first - // interval16 in rc.iv; - // - // c) ilast is set to the minimum index of rc.iv - // which comes strictly before the del.last; - // so del.last > rc.iv[ilast].last, - // and if ilast+1 exists, then del.last < rc.iv[ilast+1].start - - // INVAR: istart >= 0 - pre := rc.iv[:istart+1] - if ilast == n-1 { - rc.iv = pre - return - } - // INVAR: ilast < n-1 - lost := ilast - istart - changeSize := -lost - newSize := int64(len(rc.iv)) + changeSize - if changeSize != 0 { - copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) - } - rc.iv = rc.iv[:newSize] - return - - case startAlready && !lastAlready: - // we can only shrink or stay the same size - // i.e. we either eliminate the whole interval, - // or just cut off the right side. - res0, _ := rc.iv[istart].subtractInterval(del) - if len(res0) > 0 { - // len(res) must be 1 - rc.iv[istart] = res0[0] - } - lost := 1 + (ilast - istart) - changeSize := int64(len(res0)) - lost - newSize := int64(len(rc.iv)) + changeSize - if changeSize != 0 { - copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) - } - rc.iv = rc.iv[:newSize] - return - - case !startAlready && lastAlready: - // we can only shrink or stay the same size - res1, _ := rc.iv[ilast].subtractInterval(del) - lost := ilast - istart - changeSize := int64(len(res1)) - lost - newSize := int64(len(rc.iv)) + changeSize - if changeSize != 0 { - // move the tail first to make room for res1 - copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:]) - } - copy(rc.iv[istart+1:], res1) - rc.iv = rc.iv[:newSize] - return - } -} - -// compute rc minus b, and return the result as a new value (not inplace). -// port of run_container_andnot from CRoaring... -// https://github.com/RoaringBitmap/CRoaring/blob/master/src/containers/run.c#L435-L496 -func (rc *runContainer16) AndNotRunContainer16(b *runContainer16) *runContainer16 { - - if len(b.iv) == 0 || len(rc.iv) == 0 { - return rc - } - - dst := newRunContainer16() - apos := 0 - bpos := 0 - - a := rc - - astart := a.iv[apos].start - alast := a.iv[apos].last() - bstart := b.iv[bpos].start - blast := b.iv[bpos].last() - - alen := len(a.iv) - blen := len(b.iv) - - for apos < alen && bpos < blen { - switch { - case alast < bstart: - // output the first run - dst.iv = append(dst.iv, newInterval16Range(astart, alast)) - apos++ - if apos < alen { - astart = a.iv[apos].start - alast = a.iv[apos].last() - } - case blast < astart: - // exit the second run - bpos++ - if bpos < blen { - bstart = b.iv[bpos].start - blast = b.iv[bpos].last() - } - default: - // a: [ ] - // b: [ ] - // alast >= bstart - // blast >= astart - if astart < bstart { - dst.iv = append(dst.iv, newInterval16Range(astart, bstart-1)) - } - if alast > blast { - astart = blast + 1 - } else { - apos++ - if apos < alen { - astart = a.iv[apos].start - alast = a.iv[apos].last() - } - } - } - } - if apos < alen { - dst.iv = append(dst.iv, newInterval16Range(astart, alast)) - apos++ - if apos < alen { - dst.iv = append(dst.iv, a.iv[apos:]...) - } - } - - return dst -} - -func (rc *runContainer16) numberOfRuns() (nr int) { - return len(rc.iv) -} - -func (rc *runContainer16) containerType() contype { - return run16Contype -} - -func (rc *runContainer16) equals16(srb *runContainer16) bool { - // Check if the containers are the same object. - if rc == srb { - return true - } - - if len(srb.iv) != len(rc.iv) { - return false - } - - for i, v := range rc.iv { - if v != srb.iv[i] { - return false - } - } - return true -} - -// compile time verify we meet interface requirements -var _ container = &runContainer16{} - -func (rc *runContainer16) clone() container { - return newRunContainer16CopyIv(rc.iv) -} - -func (rc *runContainer16) minimum() uint16 { - return rc.iv[0].start // assume not empty -} - -func (rc *runContainer16) maximum() uint16 { - return rc.iv[len(rc.iv)-1].last() // assume not empty -} - -func (rc *runContainer16) isFull() bool { - return (len(rc.iv) == 1) && ((rc.iv[0].start == 0) && (rc.iv[0].last() == MaxUint16)) -} - -func (rc *runContainer16) and(a container) container { - if rc.isFull() { - return a.clone() - } - switch c := a.(type) { - case *runContainer16: - return rc.intersect(c) - case *arrayContainer: - return rc.andArray(c) - case *bitmapContainer: - return rc.andBitmapContainer(c) - } - panic("unsupported container type") -} - -func (rc *runContainer16) andCardinality(a container) int { - switch c := a.(type) { - case *runContainer16: - return int(rc.intersectCardinality(c)) - case *arrayContainer: - return rc.andArrayCardinality(c) - case *bitmapContainer: - return rc.andBitmapContainerCardinality(c) - } - panic("unsupported container type") -} - -// andBitmapContainer finds the intersection of rc and b. -func (rc *runContainer16) andBitmapContainer(bc *bitmapContainer) container { - bc2 := newBitmapContainerFromRun(rc) - return bc2.andBitmap(bc) -} - -func (rc *runContainer16) andArrayCardinality(ac *arrayContainer) int { - pos := 0 - answer := 0 - maxpos := ac.getCardinality() - if maxpos == 0 { - return 0 // won't happen in actual code - } - v := ac.content[pos] -mainloop: - for _, p := range rc.iv { - for v < p.start { - pos++ - if pos == maxpos { - break mainloop - } - v = ac.content[pos] - } - for v <= p.last() { - answer++ - pos++ - if pos == maxpos { - break mainloop - } - v = ac.content[pos] - } - } - return answer -} - -func (rc *runContainer16) iand(a container) container { - if rc.isFull() { - return a.clone() - } - switch c := a.(type) { - case *runContainer16: - return rc.inplaceIntersect(c) - case *arrayContainer: - return rc.andArray(c) - case *bitmapContainer: - return rc.iandBitmapContainer(c) - } - panic("unsupported container type") -} - -func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container { - // TODO: optimize by doing less allocation, possibly? - // sect will be new - sect := rc.intersect(rc2) - *rc = *sect - return rc -} - -func (rc *runContainer16) iandBitmapContainer(bc *bitmapContainer) container { - isect := rc.andBitmapContainer(bc) - *rc = *newRunContainer16FromContainer(isect) - return rc -} - -func (rc *runContainer16) andArray(ac *arrayContainer) container { - if len(rc.iv) == 0 { - return newArrayContainer() - } - - acCardinality := ac.getCardinality() - c := newArrayContainerCapacity(acCardinality) - - for rlePos, arrayPos := 0, 0; arrayPos < acCardinality; { - iv := rc.iv[rlePos] - arrayVal := ac.content[arrayPos] - - for iv.last() < arrayVal { - rlePos++ - if rlePos == len(rc.iv) { - return c - } - iv = rc.iv[rlePos] - } - - if iv.start > arrayVal { - arrayPos = advanceUntil(ac.content, arrayPos, len(ac.content), iv.start) - } else { - c.content = append(c.content, arrayVal) - arrayPos++ - } - } - return c -} - -func (rc *runContainer16) andNot(a container) container { - switch c := a.(type) { - case *arrayContainer: - return rc.andNotArray(c) - case *bitmapContainer: - return rc.andNotBitmap(c) - case *runContainer16: - return rc.andNotRunContainer16(c) - } - panic("unsupported container type") -} - -func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) { - k := 0 - var val int64 - for _, p := range rc.iv { - n := p.runlen() - for j := int64(0); j < n; j++ { - val = int64(p.start) + j - x[k+i] = uint32(val) | mask - k++ - } - } -} - -func (rc *runContainer16) getShortIterator() shortPeekable { - return rc.newRunIterator16() -} - -func (rc *runContainer16) getReverseIterator() shortIterable { - return rc.newRunReverseIterator16() -} - -func (rc *runContainer16) getManyIterator() manyIterable { - return rc.newManyRunIterator16() -} - -// add the values in the range [firstOfRange, endx). endx -// is still abe to express 2^16 because it is an int not an uint16. -func (rc *runContainer16) iaddRange(firstOfRange, endx int) container { - - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx)) - } - addme := newRunContainer16TakeOwnership([]interval16{ - { - start: uint16(firstOfRange), - length: uint16(endx - 1 - firstOfRange), - }, - }) - *rc = *rc.union(addme) - return rc -} - -// remove the values in the range [firstOfRange,endx) -func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+ - " nothing to do.", firstOfRange, endx)) - //return rc - } - x := newInterval16Range(uint16(firstOfRange), uint16(endx-1)) - rc.isubtract(x) - return rc -} - -// not flip the values in the range [firstOfRange,endx) -func (rc *runContainer16) not(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange)) - } - - return rc.Not(firstOfRange, endx) -} - -// Not flips the values in the range [firstOfRange,endx). -// This is not inplace. Only the returned value has the flipped bits. -// -// Currently implemented as (!A intersect B) union (A minus B), -// where A is rc, and B is the supplied [firstOfRange, endx) interval. -// -// TODO(time optimization): convert this to a single pass -// algorithm by copying AndNotRunContainer16() and modifying it. -// Current routine is correct but -// makes 2 more passes through the arrays than should be -// strictly necessary. Measure both ways though--this may not matter. -// -func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 { - - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange)) - } - - if firstOfRange >= endx { - return rc.Clone() - } - - a := rc - // algo: - // (!A intersect B) union (A minus B) - - nota := a.invert() - - bs := []interval16{newInterval16Range(uint16(firstOfRange), uint16(endx-1))} - b := newRunContainer16TakeOwnership(bs) - - notAintersectB := nota.intersect(b) - - aMinusB := a.AndNotRunContainer16(b) - - rc2 := notAintersectB.union(aMinusB) - return rc2 -} - -// equals is now logical equals; it does not require the -// same underlying container type. -func (rc *runContainer16) equals(o container) bool { - srb, ok := o.(*runContainer16) - - if !ok { - // maybe value instead of pointer - val, valok := o.(*runContainer16) - if valok { - srb = val - ok = true - } - } - if ok { - // Check if the containers are the same object. - if rc == srb { - return true - } - - if len(srb.iv) != len(rc.iv) { - return false - } - - for i, v := range rc.iv { - if v != srb.iv[i] { - return false - } - } - return true - } - - // use generic comparison - if o.getCardinality() != rc.getCardinality() { - return false - } - rit := rc.getShortIterator() - bit := o.getShortIterator() - - //k := 0 - for rit.hasNext() { - if bit.next() != rit.next() { - return false - } - //k++ - } - return true -} - -func (rc *runContainer16) iaddReturnMinimized(x uint16) container { - rc.Add(x) - return rc -} - -func (rc *runContainer16) iadd(x uint16) (wasNew bool) { - return rc.Add(x) -} - -func (rc *runContainer16) iremoveReturnMinimized(x uint16) container { - rc.removeKey(x) - return rc -} - -func (rc *runContainer16) iremove(x uint16) bool { - return rc.removeKey(x) -} - -func (rc *runContainer16) or(a container) container { - if rc.isFull() { - return rc.clone() - } - switch c := a.(type) { - case *runContainer16: - return rc.union(c) - case *arrayContainer: - return rc.orArray(c) - case *bitmapContainer: - return rc.orBitmapContainer(c) - } - panic("unsupported container type") -} - -func (rc *runContainer16) orCardinality(a container) int { - switch c := a.(type) { - case *runContainer16: - return int(rc.unionCardinality(c)) - case *arrayContainer: - return rc.orArrayCardinality(c) - case *bitmapContainer: - return rc.orBitmapContainerCardinality(c) - } - panic("unsupported container type") -} - -// orBitmapContainer finds the union of rc and bc. -func (rc *runContainer16) orBitmapContainer(bc *bitmapContainer) container { - bc2 := newBitmapContainerFromRun(rc) - return bc2.iorBitmap(bc) -} - -func (rc *runContainer16) andBitmapContainerCardinality(bc *bitmapContainer) int { - answer := 0 - for i := range rc.iv { - answer += bc.getCardinalityInRange(uint(rc.iv[i].start), uint(rc.iv[i].last())+1) - } - //bc.computeCardinality() - return answer -} - -func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int { - return rc.getCardinality() + bc.getCardinality() - rc.andBitmapContainerCardinality(bc) -} - -// orArray finds the union of rc and ac. -func (rc *runContainer16) orArray(ac *arrayContainer) container { - bc1 := newBitmapContainerFromRun(rc) - bc2 := ac.toBitmapContainer() - return bc1.orBitmap(bc2) -} - -// orArray finds the union of rc and ac. -func (rc *runContainer16) orArrayCardinality(ac *arrayContainer) int { - return ac.getCardinality() + rc.getCardinality() - rc.andArrayCardinality(ac) -} - -func (rc *runContainer16) ior(a container) container { - if rc.isFull() { - return rc - } - switch c := a.(type) { - case *runContainer16: - return rc.inplaceUnion(c) - case *arrayContainer: - return rc.iorArray(c) - case *bitmapContainer: - return rc.iorBitmapContainer(c) - } - panic("unsupported container type") -} - -func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container { - for _, p := range rc2.iv { - last := int64(p.last()) - for i := int64(p.start); i <= last; i++ { - rc.Add(uint16(i)) - } - } - return rc -} - -func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container { - - it := bc.getShortIterator() - for it.hasNext() { - rc.Add(it.next()) - } - return rc -} - -func (rc *runContainer16) iorArray(ac *arrayContainer) container { - it := ac.getShortIterator() - for it.hasNext() { - rc.Add(it.next()) - } - return rc -} - -// lazyIOR is described (not yet implemented) in -// this nice note from @lemire on -// https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737 -// -// Description of lazyOR and lazyIOR from @lemire: -// -// Lazy functions are optional and can be simply -// wrapper around non-lazy functions. -// -// The idea of "laziness" is as follows. It is -// inspired by the concept of lazy evaluation -// you might be familiar with (functional programming -// and all that). So a roaring bitmap is -// such that all its containers are, in some -// sense, chosen to use as little memory as -// possible. This is nice. Also, all bitsets -// are "cardinality aware" so that you can do -// fast rank/select queries, or query the -// cardinality of the whole bitmap... very fast, -// without latency. -// -// However, imagine that you are aggregating 100 -// bitmaps together. So you OR the first two, then OR -// that with the third one and so forth. Clearly, -// intermediate bitmaps don't need to be as -// compressed as possible, right? They can be -// in a "dirty state". You only need the end -// result to be in a nice state... which you -// can achieve by calling repairAfterLazy at the end. -// -// The Java/C code does something special for -// the in-place lazy OR runs. The idea is that -// instead of taking two run containers and -// generating a new one, we actually try to -// do the computation in-place through a -// technique invented by @gssiyankai (pinging him!). -// What you do is you check whether the host -// run container has lots of extra capacity. -// If it does, you move its data at the end of -// the backing array, and then you write -// the answer at the beginning. What this -// trick does is minimize memory allocations. -// -func (rc *runContainer16) lazyIOR(a container) container { - // not lazy at the moment - return rc.ior(a) -} - -// lazyOR is described above in lazyIOR. -func (rc *runContainer16) lazyOR(a container) container { - // not lazy at the moment - return rc.or(a) -} - -func (rc *runContainer16) intersects(a container) bool { - // TODO: optimize by doing inplace/less allocation, possibly? - isect := rc.and(a) - return isect.getCardinality() > 0 -} - -func (rc *runContainer16) xor(a container) container { - switch c := a.(type) { - case *arrayContainer: - return rc.xorArray(c) - case *bitmapContainer: - return rc.xorBitmap(c) - case *runContainer16: - return rc.xorRunContainer16(c) - } - panic("unsupported container type") -} - -func (rc *runContainer16) iandNot(a container) container { - switch c := a.(type) { - case *arrayContainer: - return rc.iandNotArray(c) - case *bitmapContainer: - return rc.iandNotBitmap(c) - case *runContainer16: - return rc.iandNotRunContainer16(c) - } - panic("unsupported container type") -} - -// flip the values in the range [firstOfRange,endx) -func (rc *runContainer16) inot(firstOfRange, endx int) container { - if firstOfRange >= endx { - panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange)) - } - // TODO: minimize copies, do it all inplace; not() makes a copy. - rc = rc.Not(firstOfRange, endx) - return rc -} - -func (rc *runContainer16) getCardinality() int { - return int(rc.cardinality()) -} - -func (rc *runContainer16) rank(x uint16) int { - n := int64(len(rc.iv)) - xx := int64(x) - w, already, _ := rc.search(xx, nil) - if w < 0 { - return 0 - } - if !already && w == n-1 { - return rc.getCardinality() - } - var rnk int64 - if !already { - for i := int64(0); i <= w; i++ { - rnk += rc.iv[i].runlen() - } - return int(rnk) - } - for i := int64(0); i < w; i++ { - rnk += rc.iv[i].runlen() - } - rnk += int64(x-rc.iv[w].start) + 1 - return int(rnk) -} - -func (rc *runContainer16) selectInt(x uint16) int { - return rc.selectInt16(x) -} - -func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container { - return rc.AndNotRunContainer16(b) -} - -func (rc *runContainer16) andNotArray(ac *arrayContainer) container { - rcb := rc.toBitmapContainer() - acb := ac.toBitmapContainer() - return rcb.andNotBitmap(acb) -} - -func (rc *runContainer16) andNotBitmap(bc *bitmapContainer) container { - rcb := rc.toBitmapContainer() - return rcb.andNotBitmap(bc) -} - -func (rc *runContainer16) toBitmapContainer() *bitmapContainer { - bc := newBitmapContainer() - for i := range rc.iv { - bc.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1) - } - bc.computeCardinality() - return bc -} - -func (rc *runContainer16) iandNotRunContainer16(x2 *runContainer16) container { - rcb := rc.toBitmapContainer() - x2b := x2.toBitmapContainer() - rcb.iandNotBitmapSurely(x2b) - // TODO: check size and optimize the return value - // TODO: is inplace modification really required? If not, elide the copy. - rc2 := newRunContainer16FromBitmapContainer(rcb) - *rc = *rc2 - return rc -} - -func (rc *runContainer16) iandNotArray(ac *arrayContainer) container { - rcb := rc.toBitmapContainer() - acb := ac.toBitmapContainer() - rcb.iandNotBitmapSurely(acb) - // TODO: check size and optimize the return value - // TODO: is inplace modification really required? If not, elide the copy. - rc2 := newRunContainer16FromBitmapContainer(rcb) - *rc = *rc2 - return rc -} - -func (rc *runContainer16) iandNotBitmap(bc *bitmapContainer) container { - rcb := rc.toBitmapContainer() - rcb.iandNotBitmapSurely(bc) - // TODO: check size and optimize the return value - // TODO: is inplace modification really required? If not, elide the copy. - rc2 := newRunContainer16FromBitmapContainer(rcb) - *rc = *rc2 - return rc -} - -func (rc *runContainer16) xorRunContainer16(x2 *runContainer16) container { - rcb := rc.toBitmapContainer() - x2b := x2.toBitmapContainer() - return rcb.xorBitmap(x2b) -} - -func (rc *runContainer16) xorArray(ac *arrayContainer) container { - rcb := rc.toBitmapContainer() - acb := ac.toBitmapContainer() - return rcb.xorBitmap(acb) -} - -func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container { - rcb := rc.toBitmapContainer() - return rcb.xorBitmap(bc) -} - -// convert to bitmap or array *if needed* -func (rc *runContainer16) toEfficientContainer() container { - - // runContainer16SerializedSizeInBytes(numRuns) - sizeAsRunContainer := rc.getSizeInBytes() - sizeAsBitmapContainer := bitmapContainerSizeInBytes() - card := int(rc.cardinality()) - sizeAsArrayContainer := arrayContainerSizeInBytes(card) - if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) { - return rc - } - if card <= arrayDefaultMaxSize { - return rc.toArrayContainer() - } - bc := newBitmapContainerFromRun(rc) - return bc -} - -func (rc *runContainer16) toArrayContainer() *arrayContainer { - ac := newArrayContainer() - for i := range rc.iv { - ac.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1) - } - return ac -} - -func newRunContainer16FromContainer(c container) *runContainer16 { - - switch x := c.(type) { - case *runContainer16: - return x.Clone() - case *arrayContainer: - return newRunContainer16FromArray(x) - case *bitmapContainer: - return newRunContainer16FromBitmapContainer(x) - } - panic("unsupported container type") -} - -// And finds the intersection of rc and b. -func (rc *runContainer16) And(b *Bitmap) *Bitmap { - out := NewBitmap() - for _, p := range rc.iv { - plast := p.last() - for i := p.start; i <= plast; i++ { - if b.Contains(uint32(i)) { - out.Add(uint32(i)) - } - } - } - return out -} - -// Xor returns the exclusive-or of rc and b. -func (rc *runContainer16) Xor(b *Bitmap) *Bitmap { - out := b.Clone() - for _, p := range rc.iv { - plast := p.last() - for v := p.start; v <= plast; v++ { - w := uint32(v) - if out.Contains(w) { - out.RemoveRange(uint64(w), uint64(w+1)) - } else { - out.Add(w) - } - } - } - return out -} - -// Or returns the union of rc and b. -func (rc *runContainer16) Or(b *Bitmap) *Bitmap { - out := b.Clone() - for _, p := range rc.iv { - plast := p.last() - for v := p.start; v <= plast; v++ { - out.Add(uint32(v)) - } - } - return out -} - -// serializedSizeInBytes returns the number of bytes of memory -// required by this runContainer16. This is for the -// Roaring format, as specified https://github.com/RoaringBitmap/RoaringFormatSpec/ -func (rc *runContainer16) serializedSizeInBytes() int { - // number of runs in one uint16, then each run - // needs two more uint16 - return 2 + len(rc.iv)*4 -} - -func (rc *runContainer16) addOffset(x uint16) []container { - low := newRunContainer16() - high := newRunContainer16() - - for _, iv := range rc.iv { - val := int(iv.start) + int(x) - finalVal := int(val) + int(iv.length) - if val <= 0xffff { - if finalVal <= 0xffff { - low.iv = append(low.iv, interval16{uint16(val), iv.length}) - } else { - low.iv = append(low.iv, interval16{uint16(val), uint16(0xffff - val)}) - high.iv = append(high.iv, interval16{uint16(0), uint16(finalVal & 0xffff)}) - } - } else { - high.iv = append(high.iv, interval16{uint16(val & 0xffff), iv.length}) - } - } - return []container{low, high} -} diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go deleted file mode 100644 index 84537d087..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer_gen.go +++ /dev/null @@ -1,1104 +0,0 @@ -package roaring - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *addHelper16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbai uint32 - zbai, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "runstart": - z.runstart, err = dc.ReadUint16() - if err != nil { - return - } - case "runlen": - z.runlen, err = dc.ReadUint16() - if err != nil { - return - } - case "actuallyAdded": - z.actuallyAdded, err = dc.ReadUint16() - if err != nil { - return - } - case "m": - var zcmr uint32 - zcmr, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.m) >= int(zcmr) { - z.m = (z.m)[:zcmr] - } else { - z.m = make([]interval16, zcmr) - } - for zxvk := range z.m { - var zajw uint32 - zajw, err = dc.ReadMapHeader() - if err != nil { - return - } - for zajw > 0 { - zajw-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.m[zxvk].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.m[zxvk].length, err = dc.ReadUint16() - z.m[zxvk].length -= z.m[zxvk].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "rc": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - var zwht uint32 - zwht, err = dc.ReadMapHeader() - if err != nil { - return - } - for zwht > 0 { - zwht-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zhct uint32 - zhct, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.rc.iv) >= int(zhct) { - z.rc.iv = (z.rc.iv)[:zhct] - } else { - z.rc.iv = make([]interval16, zhct) - } - for zbzg := range z.rc.iv { - var zcua uint32 - zcua, err = dc.ReadMapHeader() - if err != nil { - return - } - for zcua > 0 { - zcua-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.rc.iv[zbzg].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.rc.iv[zbzg].length, err = dc.ReadUint16() - z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "card": - z.rc.card, err = dc.ReadInt64() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *addHelper16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 5 - // write "runstart" - err = en.Append(0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.runstart) - if err != nil { - return - } - // write "runlen" - err = en.Append(0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e) - if err != nil { - return err - } - err = en.WriteUint16(z.runlen) - if err != nil { - return - } - // write "actuallyAdded" - err = en.Append(0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64) - if err != nil { - return err - } - err = en.WriteUint16(z.actuallyAdded) - if err != nil { - return - } - // write "m" - err = en.Append(0xa1, 0x6d) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.m))) - if err != nil { - return - } - for zxvk := range z.m { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.m[zxvk].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.m[zxvk].last()) - if err != nil { - return - } - } - // write "rc" - err = en.Append(0xa2, 0x72, 0x63) - if err != nil { - return err - } - if z.rc == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - // map header, size 2 - // write "iv" - err = en.Append(0x82, 0xa2, 0x69, 0x76) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.rc.iv))) - if err != nil { - return - } - for zbzg := range z.rc.iv { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.rc.iv[zbzg].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.rc.iv[zbzg].last()) - if err != nil { - return - } - } - // write "card" - err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.rc.card) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *addHelper16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 5 - // string "runstart" - o = append(o, 0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.runstart) - // string "runlen" - o = append(o, 0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e) - o = msgp.AppendUint16(o, z.runlen) - // string "actuallyAdded" - o = append(o, 0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64) - o = msgp.AppendUint16(o, z.actuallyAdded) - // string "m" - o = append(o, 0xa1, 0x6d) - o = msgp.AppendArrayHeader(o, uint32(len(z.m))) - for zxvk := range z.m { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.m[zxvk].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.m[zxvk].last()) - } - // string "rc" - o = append(o, 0xa2, 0x72, 0x63) - if z.rc == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "iv" - o = append(o, 0x82, 0xa2, 0x69, 0x76) - o = msgp.AppendArrayHeader(o, uint32(len(z.rc.iv))) - for zbzg := range z.rc.iv { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.rc.iv[zbzg].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.rc.iv[zbzg].last()) - } - // string "card" - o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64) - o = msgp.AppendInt64(o, z.rc.card) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *addHelper16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zxhx uint32 - zxhx, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zxhx > 0 { - zxhx-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "runstart": - z.runstart, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "runlen": - z.runlen, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "actuallyAdded": - z.actuallyAdded, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "m": - var zlqf uint32 - zlqf, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.m) >= int(zlqf) { - z.m = (z.m)[:zlqf] - } else { - z.m = make([]interval16, zlqf) - } - for zxvk := range z.m { - var zdaf uint32 - zdaf, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zdaf > 0 { - zdaf-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.m[zxvk].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.m[zxvk].length, bts, err = msgp.ReadUint16Bytes(bts) - z.m[zxvk].length -= z.m[zxvk].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "rc": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - var zpks uint32 - zpks, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zpks > 0 { - zpks-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zjfb uint32 - zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.rc.iv) >= int(zjfb) { - z.rc.iv = (z.rc.iv)[:zjfb] - } else { - z.rc.iv = make([]interval16, zjfb) - } - for zbzg := range z.rc.iv { - var zcxo uint32 - zcxo, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zcxo > 0 { - zcxo-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.rc.iv[zbzg].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.rc.iv[zbzg].length, bts, err = msgp.ReadUint16Bytes(bts) - z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "card": - z.rc.card, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *addHelper16) Msgsize() (s int) { - s = 1 + 9 + msgp.Uint16Size + 7 + msgp.Uint16Size + 14 + msgp.Uint16Size + 2 + msgp.ArrayHeaderSize + (len(z.m) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 3 - if z.rc == nil { - s += msgp.NilSize - } else { - s += 1 + 3 + msgp.ArrayHeaderSize + (len(z.rc.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size - } - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *interval16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zeff uint32 - zeff, err = dc.ReadMapHeader() - if err != nil { - return - } - for zeff > 0 { - zeff-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.length, err = dc.ReadUint16() - z.length = -z.start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z interval16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.last()) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z interval16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.last()) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *interval16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zrsw uint32 - zrsw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zrsw > 0 { - zrsw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.length, bts, err = msgp.ReadUint16Bytes(bts) - z.length -= z.start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z interval16) Msgsize() (s int) { - s = 1 + 6 + msgp.Uint16Size + 5 + msgp.Uint16Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *runContainer16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zdnj uint32 - zdnj, err = dc.ReadMapHeader() - if err != nil { - return - } - for zdnj > 0 { - zdnj-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zobc uint32 - zobc, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap(z.iv) >= int(zobc) { - z.iv = (z.iv)[:zobc] - } else { - z.iv = make([]interval16, zobc) - } - for zxpk := range z.iv { - var zsnv uint32 - zsnv, err = dc.ReadMapHeader() - if err != nil { - return - } - for zsnv > 0 { - zsnv-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.iv[zxpk].start, err = dc.ReadUint16() - if err != nil { - return - } - case "last": - z.iv[zxpk].length, err = dc.ReadUint16() - z.iv[zxpk].length -= z.iv[zxpk].start - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - } - case "card": - z.card, err = dc.ReadInt64() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *runContainer16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "iv" - err = en.Append(0x82, 0xa2, 0x69, 0x76) - if err != nil { - return err - } - err = en.WriteArrayHeader(uint32(len(z.iv))) - if err != nil { - return - } - for zxpk := range z.iv { - // map header, size 2 - // write "start" - err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.iv[zxpk].start) - if err != nil { - return - } - // write "last" - err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74) - if err != nil { - return err - } - err = en.WriteUint16(z.iv[zxpk].last()) - if err != nil { - return - } - } - // write "card" - err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.card) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *runContainer16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "iv" - o = append(o, 0x82, 0xa2, 0x69, 0x76) - o = msgp.AppendArrayHeader(o, uint32(len(z.iv))) - for zxpk := range z.iv { - // map header, size 2 - // string "start" - o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74) - o = msgp.AppendUint16(o, z.iv[zxpk].start) - // string "last" - o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74) - o = msgp.AppendUint16(o, z.iv[zxpk].last()) - } - // string "card" - o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64) - o = msgp.AppendInt64(o, z.card) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *runContainer16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zkgt uint32 - zkgt, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zkgt > 0 { - zkgt-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "iv": - var zema uint32 - zema, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap(z.iv) >= int(zema) { - z.iv = (z.iv)[:zema] - } else { - z.iv = make([]interval16, zema) - } - for zxpk := range z.iv { - var zpez uint32 - zpez, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zpez > 0 { - zpez-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "start": - z.iv[zxpk].start, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - case "last": - z.iv[zxpk].length, bts, err = msgp.ReadUint16Bytes(bts) - z.iv[zxpk].length -= z.iv[zxpk].start - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - } - case "card": - z.card, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *runContainer16) Msgsize() (s int) { - s = 1 + 3 + msgp.ArrayHeaderSize + (len(z.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *runIterator16) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zqke uint32 - zqke, err = dc.ReadMapHeader() - if err != nil { - return - } - for zqke > 0 { - zqke-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "rc": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - err = z.rc.DecodeMsg(dc) - if err != nil { - return - } - } - case "curIndex": - z.curIndex, err = dc.ReadInt64() - if err != nil { - return - } - case "curPosInIndex": - z.curPosInIndex, err = dc.ReadUint16() - if err != nil { - return - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z *runIterator16) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "rc" - err = en.Append(0x83, 0xa2, 0x72, 0x63) - if err != nil { - return err - } - if z.rc == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.rc.EncodeMsg(en) - if err != nil { - return - } - } - // write "curIndex" - err = en.Append(0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return err - } - err = en.WriteInt64(z.curIndex) - if err != nil { - return - } - // write "curPosInIndex" - err = en.Append(0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return err - } - err = en.WriteUint16(z.curPosInIndex) - if err != nil { - return - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z *runIterator16) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "rc" - o = append(o, 0x83, 0xa2, 0x72, 0x63) - if z.rc == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.rc.MarshalMsg(o) - if err != nil { - return - } - } - // string "curIndex" - o = append(o, 0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendInt64(o, z.curIndex) - // string "curPosInIndex" - o = append(o, 0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendUint16(o, z.curPosInIndex) - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *runIterator16) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zqyh uint32 - zqyh, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zqyh > 0 { - zqyh-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "rc": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.rc = nil - } else { - if z.rc == nil { - z.rc = new(runContainer16) - } - bts, err = z.rc.UnmarshalMsg(bts) - if err != nil { - return - } - } - case "curIndex": - z.curIndex, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - case "curPosInIndex": - z.curPosInIndex, bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *runIterator16) Msgsize() (s int) { - s = 1 + 3 - if z.rc == nil { - s += msgp.NilSize - } else { - s += z.rc.Msgsize() - } - s += 9 + msgp.Int64Size + 14 + msgp.Uint16Size - return -} - -// Deprecated: DecodeMsg implements msgp.Decodable -func (z *uint16Slice) DecodeMsg(dc *msgp.Reader) (err error) { - var zjpj uint32 - zjpj, err = dc.ReadArrayHeader() - if err != nil { - return - } - if cap((*z)) >= int(zjpj) { - (*z) = (*z)[:zjpj] - } else { - (*z) = make(uint16Slice, zjpj) - } - for zywj := range *z { - (*z)[zywj], err = dc.ReadUint16() - if err != nil { - return - } - } - return -} - -// Deprecated: EncodeMsg implements msgp.Encodable -func (z uint16Slice) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteArrayHeader(uint32(len(z))) - if err != nil { - return - } - for zzpf := range z { - err = en.WriteUint16(z[zzpf]) - if err != nil { - return - } - } - return -} - -// Deprecated: MarshalMsg implements msgp.Marshaler -func (z uint16Slice) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendArrayHeader(o, uint32(len(z))) - for zzpf := range z { - o = msgp.AppendUint16(o, z[zzpf]) - } - return -} - -// Deprecated: UnmarshalMsg implements msgp.Unmarshaler -func (z *uint16Slice) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zgmo uint32 - zgmo, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - return - } - if cap((*z)) >= int(zgmo) { - (*z) = (*z)[:zgmo] - } else { - (*z) = make(uint16Slice, zgmo) - } - for zrfe := range *z { - (*z)[zrfe], bts, err = msgp.ReadUint16Bytes(bts) - if err != nil { - return - } - } - o = bts - return -} - -// Deprecated: Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z uint16Slice) Msgsize() (s int) { - s = msgp.ArrayHeaderSize + (len(z) * (msgp.Uint16Size)) - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization.go b/vendor/github.com/RoaringBitmap/roaring/serialization.go deleted file mode 100644 index 7b7ed29b0..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/serialization.go +++ /dev/null @@ -1,34 +0,0 @@ -package roaring - -import ( - "encoding/binary" - "io" - - "github.com/tinylib/msgp/msgp" -) - -// writeTo for runContainer16 follows this -// spec: https://github.com/RoaringBitmap/RoaringFormatSpec -// -func (b *runContainer16) writeTo(stream io.Writer) (int, error) { - buf := make([]byte, 2+4*len(b.iv)) - binary.LittleEndian.PutUint16(buf[0:], uint16(len(b.iv))) - for i, v := range b.iv { - binary.LittleEndian.PutUint16(buf[2+i*4:], v.start) - binary.LittleEndian.PutUint16(buf[2+2+i*4:], v.length) - } - return stream.Write(buf) -} - -func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) { - bts, err := b.MarshalMsg(nil) - if err != nil { - return 0, err - } - return stream.Write(bts) -} - -func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) { - err := msgp.Decode(stream, b) - return 0, err -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go b/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go deleted file mode 100644 index 4b9d9e3d4..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go +++ /dev/null @@ -1,133 +0,0 @@ -// +build !amd64,!386 appengine - -package roaring - -import ( - "encoding/binary" - "errors" - "io" -) - -func (b *arrayContainer) writeTo(stream io.Writer) (int, error) { - buf := make([]byte, 2*len(b.content)) - for i, v := range b.content { - base := i * 2 - buf[base] = byte(v) - buf[base+1] = byte(v >> 8) - } - return stream.Write(buf) -} - -func (b *arrayContainer) readFrom(stream io.Reader) (int, error) { - err := binary.Read(stream, binary.LittleEndian, b.content) - if err != nil { - return 0, err - } - return 2 * len(b.content), nil -} - -func (b *bitmapContainer) writeTo(stream io.Writer) (int, error) { - if b.cardinality <= arrayDefaultMaxSize { - return 0, errors.New("refusing to write bitmap container with cardinality of array container") - } - - // Write set - buf := make([]byte, 8*len(b.bitmap)) - for i, v := range b.bitmap { - base := i * 8 - buf[base] = byte(v) - buf[base+1] = byte(v >> 8) - buf[base+2] = byte(v >> 16) - buf[base+3] = byte(v >> 24) - buf[base+4] = byte(v >> 32) - buf[base+5] = byte(v >> 40) - buf[base+6] = byte(v >> 48) - buf[base+7] = byte(v >> 56) - } - return stream.Write(buf) -} - -func (b *bitmapContainer) readFrom(stream io.Reader) (int, error) { - err := binary.Read(stream, binary.LittleEndian, b.bitmap) - if err != nil { - return 0, err - } - b.computeCardinality() - return 8 * len(b.bitmap), nil -} - -func (bc *bitmapContainer) asLittleEndianByteSlice() []byte { - by := make([]byte, len(bc.bitmap)*8) - for i := range bc.bitmap { - binary.LittleEndian.PutUint64(by[i*8:], bc.bitmap[i]) - } - return by -} - -func uint64SliceAsByteSlice(slice []uint64) []byte { - by := make([]byte, len(slice)*8) - - for i, v := range slice { - binary.LittleEndian.PutUint64(by[i*8:], v) - } - - return by -} - -func uint16SliceAsByteSlice(slice []uint16) []byte { - by := make([]byte, len(slice)*2) - - for i, v := range slice { - binary.LittleEndian.PutUint16(by[i*2:], v) - } - - return by -} - -func byteSliceAsUint16Slice(slice []byte) []uint16 { - if len(slice)%2 != 0 { - panic("Slice size should be divisible by 2") - } - - b := make([]uint16, len(slice)/2) - - for i := range b { - b[i] = binary.LittleEndian.Uint16(slice[2*i:]) - } - - return b -} - -func byteSliceAsUint64Slice(slice []byte) []uint64 { - if len(slice)%8 != 0 { - panic("Slice size should be divisible by 8") - } - - b := make([]uint64, len(slice)/8) - - for i := range b { - b[i] = binary.LittleEndian.Uint64(slice[8*i:]) - } - - return b -} - -// Converts a byte slice to a interval16 slice. -// The function assumes that the slice byte buffer is run container data -// encoded according to Roaring Format Spec -func byteSliceAsInterval16Slice(byteSlice []byte) []interval16 { - if len(byteSlice)%4 != 0 { - panic("Slice size should be divisible by 4") - } - - intervalSlice := make([]interval16, len(byteSlice)/4) - - for i := range intervalSlice { - intervalSlice[i] = interval16{ - start: binary.LittleEndian.Uint16(byteSlice[i*4:]), - length: binary.LittleEndian.Uint16(byteSlice[i*4+2:]), - } - } - - return intervalSlice -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go deleted file mode 100644 index 818a06c80..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go +++ /dev/null @@ -1,134 +0,0 @@ -// +build 386 amd64,!appengine - -package roaring - -import ( - "errors" - "io" - "reflect" - "runtime" - "unsafe" -) - -func (ac *arrayContainer) writeTo(stream io.Writer) (int, error) { - buf := uint16SliceAsByteSlice(ac.content) - return stream.Write(buf) -} - -func (bc *bitmapContainer) writeTo(stream io.Writer) (int, error) { - if bc.cardinality <= arrayDefaultMaxSize { - return 0, errors.New("refusing to write bitmap container with cardinality of array container") - } - buf := uint64SliceAsByteSlice(bc.bitmap) - return stream.Write(buf) -} - -func uint64SliceAsByteSlice(slice []uint64) []byte { - // make a new slice header - header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice)) - - // update its capacity and length - header.Len *= 8 - header.Cap *= 8 - - // instantiate result and use KeepAlive so data isn't unmapped. - result := *(*[]byte)(unsafe.Pointer(&header)) - runtime.KeepAlive(&slice) - - // return it - return result -} - -func uint16SliceAsByteSlice(slice []uint16) []byte { - // make a new slice header - header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice)) - - // update its capacity and length - header.Len *= 2 - header.Cap *= 2 - - // instantiate result and use KeepAlive so data isn't unmapped. - result := *(*[]byte)(unsafe.Pointer(&header)) - runtime.KeepAlive(&slice) - - // return it - return result -} - -func (bc *bitmapContainer) asLittleEndianByteSlice() []byte { - return uint64SliceAsByteSlice(bc.bitmap) -} - -// Deserialization code follows - -//// -// These methods (byteSliceAsUint16Slice,...) do not make copies, -// they are pointer-based (unsafe). The caller is responsible to -// ensure that the input slice does not get garbage collected, deleted -// or modified while you hold the returned slince. -//// -func byteSliceAsUint16Slice(slice []byte) (result []uint16) { // here we create a new slice holder - if len(slice)%2 != 0 { - panic("Slice size should be divisible by 2") - } - // reference: https://go101.org/article/unsafe.html - - // make a new slice header - bHeader := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - rHeader := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - - // transfer the data from the given slice to a new variable (our result) - rHeader.Data = bHeader.Data - rHeader.Len = bHeader.Len / 2 - rHeader.Cap = bHeader.Cap / 2 - - // instantiate result and use KeepAlive so data isn't unmapped. - runtime.KeepAlive(&slice) // it is still crucial, GC can free it) - - // return result - return -} - -func byteSliceAsUint64Slice(slice []byte) (result []uint64) { - if len(slice)%8 != 0 { - panic("Slice size should be divisible by 8") - } - // reference: https://go101.org/article/unsafe.html - - // make a new slice header - bHeader := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - rHeader := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - - // transfer the data from the given slice to a new variable (our result) - rHeader.Data = bHeader.Data - rHeader.Len = bHeader.Len / 8 - rHeader.Cap = bHeader.Cap / 8 - - // instantiate result and use KeepAlive so data isn't unmapped. - runtime.KeepAlive(&slice) // it is still crucial, GC can free it) - - // return result - return -} - -func byteSliceAsInterval16Slice(slice []byte) (result []interval16) { - if len(slice)%4 != 0 { - panic("Slice size should be divisible by 4") - } - // reference: https://go101.org/article/unsafe.html - - // make a new slice header - bHeader := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - rHeader := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - - // transfer the data from the given slice to a new variable (our result) - rHeader.Data = bHeader.Data - rHeader.Len = bHeader.Len / 4 - rHeader.Cap = bHeader.Cap / 4 - - // instantiate result and use KeepAlive so data isn't unmapped. - runtime.KeepAlive(&slice) // it is still crucial, GC can free it) - - // return result - return -} diff --git a/vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go b/vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go deleted file mode 100644 index 5eaa22202..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build gofuzz - -package roaring - -import "bytes" - -func FuzzSerializationStream(data []byte) int { - newrb := NewBitmap() - if _, err := newrb.ReadFrom(bytes.NewReader(data)); err != nil { - return 0 - } - return 1 -} - -func FuzzSerializationBuffer(data []byte) int { - newrb := NewBitmap() - if _, err := newrb.FromBuffer(data); err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil.go b/vendor/github.com/RoaringBitmap/roaring/setutil.go deleted file mode 100644 index 3e8c01dd1..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/setutil.go +++ /dev/null @@ -1,609 +0,0 @@ -package roaring - -func equal(a, b []uint16) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -func difference(set1 []uint16, set2 []uint16, buffer []uint16) int { - if 0 == len(set2) { - for k := 0; k < len(set1); k++ { - buffer[k] = set1[k] - } - return len(set1) - } - if 0 == len(set1) { - return 0 - } - pos := 0 - k1 := 0 - k2 := 0 - buffer = buffer[:cap(buffer)] - s1 := set1[k1] - s2 := set2[k2] - for { - if s1 < s2 { - buffer[pos] = s1 - pos++ - k1++ - if k1 >= len(set1) { - break - } - s1 = set1[k1] - } else if s1 == s2 { - k1++ - k2++ - if k1 >= len(set1) { - break - } - s1 = set1[k1] - if k2 >= len(set2) { - for ; k1 < len(set1); k1++ { - buffer[pos] = set1[k1] - pos++ - } - break - } - s2 = set2[k2] - } else { // if (val1>val2) - k2++ - if k2 >= len(set2) { - for ; k1 < len(set1); k1++ { - buffer[pos] = set1[k1] - pos++ - } - break - } - s2 = set2[k2] - } - } - return pos - -} - -func exclusiveUnion2by2(set1 []uint16, set2 []uint16, buffer []uint16) int { - if 0 == len(set2) { - buffer = buffer[:len(set1)] - copy(buffer, set1[:]) - return len(set1) - } - if 0 == len(set1) { - buffer = buffer[:len(set2)] - copy(buffer, set2[:]) - return len(set2) - } - pos := 0 - k1 := 0 - k2 := 0 - s1 := set1[k1] - s2 := set2[k2] - buffer = buffer[:cap(buffer)] - for { - if s1 < s2 { - buffer[pos] = s1 - pos++ - k1++ - if k1 >= len(set1) { - for ; k2 < len(set2); k2++ { - buffer[pos] = set2[k2] - pos++ - } - break - } - s1 = set1[k1] - } else if s1 == s2 { - k1++ - k2++ - if k1 >= len(set1) { - for ; k2 < len(set2); k2++ { - buffer[pos] = set2[k2] - pos++ - } - break - } - if k2 >= len(set2) { - for ; k1 < len(set1); k1++ { - buffer[pos] = set1[k1] - pos++ - } - break - } - s1 = set1[k1] - s2 = set2[k2] - } else { // if (val1>val2) - buffer[pos] = s2 - pos++ - k2++ - if k2 >= len(set2) { - for ; k1 < len(set1); k1++ { - buffer[pos] = set1[k1] - pos++ - } - break - } - s2 = set2[k2] - } - } - return pos -} - -func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int { - pos := 0 - k1 := 0 - k2 := 0 - if 0 == len(set2) { - buffer = buffer[:len(set1)] - copy(buffer, set1[:]) - return len(set1) - } - if 0 == len(set1) { - buffer = buffer[:len(set2)] - copy(buffer, set2[:]) - return len(set2) - } - s1 := set1[k1] - s2 := set2[k2] - buffer = buffer[:cap(buffer)] - for { - if s1 < s2 { - buffer[pos] = s1 - pos++ - k1++ - if k1 >= len(set1) { - copy(buffer[pos:], set2[k2:]) - pos += len(set2) - k2 - break - } - s1 = set1[k1] - } else if s1 == s2 { - buffer[pos] = s1 - pos++ - k1++ - k2++ - if k1 >= len(set1) { - copy(buffer[pos:], set2[k2:]) - pos += len(set2) - k2 - break - } - if k2 >= len(set2) { - copy(buffer[pos:], set1[k1:]) - pos += len(set1) - k1 - break - } - s1 = set1[k1] - s2 = set2[k2] - } else { // if (set1[k1]>set2[k2]) - buffer[pos] = s2 - pos++ - k2++ - if k2 >= len(set2) { - copy(buffer[pos:], set1[k1:]) - pos += len(set1) - k1 - break - } - s2 = set2[k2] - } - } - return pos -} - -func union2by2Cardinality(set1 []uint16, set2 []uint16) int { - pos := 0 - k1 := 0 - k2 := 0 - if 0 == len(set2) { - return len(set1) - } - if 0 == len(set1) { - return len(set2) - } - s1 := set1[k1] - s2 := set2[k2] - for { - if s1 < s2 { - pos++ - k1++ - if k1 >= len(set1) { - pos += len(set2) - k2 - break - } - s1 = set1[k1] - } else if s1 == s2 { - pos++ - k1++ - k2++ - if k1 >= len(set1) { - pos += len(set2) - k2 - break - } - if k2 >= len(set2) { - pos += len(set1) - k1 - break - } - s1 = set1[k1] - s2 = set2[k2] - } else { // if (set1[k1]>set2[k2]) - pos++ - k2++ - if k2 >= len(set2) { - pos += len(set1) - k1 - break - } - s2 = set2[k2] - } - } - return pos -} - -func intersection2by2( - set1 []uint16, - set2 []uint16, - buffer []uint16) int { - - if len(set1)*64 < len(set2) { - return onesidedgallopingintersect2by2(set1, set2, buffer) - } else if len(set2)*64 < len(set1) { - return onesidedgallopingintersect2by2(set2, set1, buffer) - } else { - return localintersect2by2(set1, set2, buffer) - } -} - -func intersection2by2Cardinality( - set1 []uint16, - set2 []uint16) int { - - if len(set1)*64 < len(set2) { - return onesidedgallopingintersect2by2Cardinality(set1, set2) - } else if len(set2)*64 < len(set1) { - return onesidedgallopingintersect2by2Cardinality(set2, set1) - } else { - return localintersect2by2Cardinality(set1, set2) - } -} - -func intersects2by2( - set1 []uint16, - set2 []uint16) bool { - // could be optimized if one set is much larger than the other one - if (0 == len(set1)) || (0 == len(set2)) { - return false - } - k1 := 0 - k2 := 0 - s1 := set1[k1] - s2 := set2[k2] -mainwhile: - for { - - if s2 < s1 { - for { - k2++ - if k2 == len(set2) { - break mainwhile - } - s2 = set2[k2] - if s2 >= s1 { - break - } - } - } - if s1 < s2 { - for { - k1++ - if k1 == len(set1) { - break mainwhile - } - s1 = set1[k1] - if s1 >= s2 { - break - } - } - - } else { - // (set2[k2] == set1[k1]) - return true - } - } - return false -} - -func localintersect2by2( - set1 []uint16, - set2 []uint16, - buffer []uint16) int { - - if (0 == len(set1)) || (0 == len(set2)) { - return 0 - } - k1 := 0 - k2 := 0 - pos := 0 - buffer = buffer[:cap(buffer)] - s1 := set1[k1] - s2 := set2[k2] -mainwhile: - for { - if s2 < s1 { - for { - k2++ - if k2 == len(set2) { - break mainwhile - } - s2 = set2[k2] - if s2 >= s1 { - break - } - } - } - if s1 < s2 { - for { - k1++ - if k1 == len(set1) { - break mainwhile - } - s1 = set1[k1] - if s1 >= s2 { - break - } - } - - } else { - // (set2[k2] == set1[k1]) - buffer[pos] = s1 - pos++ - k1++ - if k1 == len(set1) { - break - } - s1 = set1[k1] - k2++ - if k2 == len(set2) { - break - } - s2 = set2[k2] - } - } - return pos -} - -func localintersect2by2Cardinality( - set1 []uint16, - set2 []uint16) int { - - if (0 == len(set1)) || (0 == len(set2)) { - return 0 - } - k1 := 0 - k2 := 0 - pos := 0 - s1 := set1[k1] - s2 := set2[k2] -mainwhile: - for { - if s2 < s1 { - for { - k2++ - if k2 == len(set2) { - break mainwhile - } - s2 = set2[k2] - if s2 >= s1 { - break - } - } - } - if s1 < s2 { - for { - k1++ - if k1 == len(set1) { - break mainwhile - } - s1 = set1[k1] - if s1 >= s2 { - break - } - } - - } else { - // (set2[k2] == set1[k1]) - pos++ - k1++ - if k1 == len(set1) { - break - } - s1 = set1[k1] - k2++ - if k2 == len(set2) { - break - } - s2 = set2[k2] - } - } - return pos -} - -func advanceUntil( - array []uint16, - pos int, - length int, - min uint16) int { - lower := pos + 1 - - if lower >= length || array[lower] >= min { - return lower - } - - spansize := 1 - - for lower+spansize < length && array[lower+spansize] < min { - spansize *= 2 - } - var upper int - if lower+spansize < length { - upper = lower + spansize - } else { - upper = length - 1 - } - - if array[upper] == min { - return upper - } - - if array[upper] < min { - // means - // array - // has no - // item - // >= min - // pos = array.length; - return length - } - - // we know that the next-smallest span was too small - lower += (spansize >> 1) - - mid := 0 - for lower+1 != upper { - mid = (lower + upper) >> 1 - if array[mid] == min { - return mid - } else if array[mid] < min { - lower = mid - } else { - upper = mid - } - } - return upper - -} - -func onesidedgallopingintersect2by2( - smallset []uint16, - largeset []uint16, - buffer []uint16) int { - - if 0 == len(smallset) { - return 0 - } - buffer = buffer[:cap(buffer)] - k1 := 0 - k2 := 0 - pos := 0 - s1 := largeset[k1] - s2 := smallset[k2] -mainwhile: - - for { - if s1 < s2 { - k1 = advanceUntil(largeset, k1, len(largeset), s2) - if k1 == len(largeset) { - break mainwhile - } - s1 = largeset[k1] - } - if s2 < s1 { - k2++ - if k2 == len(smallset) { - break mainwhile - } - s2 = smallset[k2] - } else { - - buffer[pos] = s2 - pos++ - k2++ - if k2 == len(smallset) { - break - } - s2 = smallset[k2] - k1 = advanceUntil(largeset, k1, len(largeset), s2) - if k1 == len(largeset) { - break mainwhile - } - s1 = largeset[k1] - } - - } - return pos -} - -func onesidedgallopingintersect2by2Cardinality( - smallset []uint16, - largeset []uint16) int { - - if 0 == len(smallset) { - return 0 - } - k1 := 0 - k2 := 0 - pos := 0 - s1 := largeset[k1] - s2 := smallset[k2] -mainwhile: - - for { - if s1 < s2 { - k1 = advanceUntil(largeset, k1, len(largeset), s2) - if k1 == len(largeset) { - break mainwhile - } - s1 = largeset[k1] - } - if s2 < s1 { - k2++ - if k2 == len(smallset) { - break mainwhile - } - s2 = smallset[k2] - } else { - - pos++ - k2++ - if k2 == len(smallset) { - break - } - s2 = smallset[k2] - k1 = advanceUntil(largeset, k1, len(largeset), s2) - if k1 == len(largeset) { - break mainwhile - } - s1 = largeset[k1] - } - - } - return pos -} - -func binarySearch(array []uint16, ikey uint16) int { - low := 0 - high := len(array) - 1 - for low+16 <= high { - middleIndex := int(uint32(low+high) >> 1) - middleValue := array[middleIndex] - if middleValue < ikey { - low = middleIndex + 1 - } else if middleValue > ikey { - high = middleIndex - 1 - } else { - return middleIndex - } - } - for ; low <= high; low++ { - val := array[low] - if val >= ikey { - if val == ikey { - return low - } - break - } - } - return -(low + 1) -} diff --git a/vendor/github.com/RoaringBitmap/roaring/shortiterator.go b/vendor/github.com/RoaringBitmap/roaring/shortiterator.go deleted file mode 100644 index 15b78bd0c..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/shortiterator.go +++ /dev/null @@ -1,52 +0,0 @@ -package roaring - -type shortIterable interface { - hasNext() bool - next() uint16 -} - -type shortPeekable interface { - shortIterable - peekNext() uint16 - advanceIfNeeded(minval uint16) -} - -type shortIterator struct { - slice []uint16 - loc int -} - -func (si *shortIterator) hasNext() bool { - return si.loc < len(si.slice) -} - -func (si *shortIterator) next() uint16 { - a := si.slice[si.loc] - si.loc++ - return a -} - -func (si *shortIterator) peekNext() uint16 { - return si.slice[si.loc] -} - -func (si *shortIterator) advanceIfNeeded(minval uint16) { - if si.hasNext() && si.peekNext() < minval { - si.loc = advanceUntil(si.slice, si.loc, len(si.slice), minval) - } -} - -type reverseIterator struct { - slice []uint16 - loc int -} - -func (si *reverseIterator) hasNext() bool { - return si.loc >= 0 -} - -func (si *reverseIterator) next() uint16 { - a := si.slice[si.loc] - si.loc-- - return a -} diff --git a/vendor/github.com/RoaringBitmap/roaring/smat.go b/vendor/github.com/RoaringBitmap/roaring/smat.go deleted file mode 100644 index 9da475634..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/smat.go +++ /dev/null @@ -1,383 +0,0 @@ -// +build gofuzz - -/* -# Instructions for smat testing for roaring - -[smat](https://github.com/mschoch/smat) is a framework that provides -state machine assisted fuzz testing. - -To run the smat tests for roaring... - -## Prerequisites - - $ go get github.com/dvyukov/go-fuzz/go-fuzz - $ go get github.com/dvyukov/go-fuzz/go-fuzz-build - -## Steps - -1. Generate initial smat corpus: -``` - go test -tags=gofuzz -run=TestGenerateSmatCorpus -``` - -2. Build go-fuzz test program with instrumentation: -``` - go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring -``` - -3. Run go-fuzz: -``` - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 -``` - -You should see output like... -``` -2016/09/16 13:58:35 slaves: 8, corpus: 1 (3s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 3s -2016/09/16 13:58:38 slaves: 8, corpus: 1 (6s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 6s -2016/09/16 13:58:41 slaves: 8, corpus: 1 (9s ago), crashers: 0, restarts: 1/44, execs: 44 (5/sec), cover: 0, uptime: 9s -2016/09/16 13:58:44 slaves: 8, corpus: 1 (12s ago), crashers: 0, restarts: 1/45, execs: 45 (4/sec), cover: 0, uptime: 12s -2016/09/16 13:58:47 slaves: 8, corpus: 1 (15s ago), crashers: 0, restarts: 1/46, execs: 46 (3/sec), cover: 0, uptime: 15s -2016/09/16 13:58:50 slaves: 8, corpus: 1 (18s ago), crashers: 0, restarts: 1/47, execs: 47 (3/sec), cover: 0, uptime: 18s -2016/09/16 13:58:53 slaves: 8, corpus: 1 (21s ago), crashers: 0, restarts: 1/63, execs: 63 (3/sec), cover: 0, uptime: 21s -2016/09/16 13:58:56 slaves: 8, corpus: 1 (24s ago), crashers: 0, restarts: 1/65, execs: 65 (3/sec), cover: 0, uptime: 24s -2016/09/16 13:58:59 slaves: 8, corpus: 1 (27s ago), crashers: 0, restarts: 1/66, execs: 66 (2/sec), cover: 0, uptime: 27s -2016/09/16 13:59:02 slaves: 8, corpus: 1 (30s ago), crashers: 0, restarts: 1/67, execs: 67 (2/sec), cover: 0, uptime: 30s -2016/09/16 13:59:05 slaves: 8, corpus: 1 (33s ago), crashers: 0, restarts: 1/83, execs: 83 (3/sec), cover: 0, uptime: 33s -2016/09/16 13:59:08 slaves: 8, corpus: 1 (36s ago), crashers: 0, restarts: 1/84, execs: 84 (2/sec), cover: 0, uptime: 36s -2016/09/16 13:59:11 slaves: 8, corpus: 2 (0s ago), crashers: 0, restarts: 1/85, execs: 85 (2/sec), cover: 0, uptime: 39s -2016/09/16 13:59:14 slaves: 8, corpus: 17 (2s ago), crashers: 0, restarts: 1/86, execs: 86 (2/sec), cover: 480, uptime: 42s -2016/09/16 13:59:17 slaves: 8, corpus: 17 (5s ago), crashers: 0, restarts: 1/66, execs: 132 (3/sec), cover: 487, uptime: 45s -2016/09/16 13:59:20 slaves: 8, corpus: 17 (8s ago), crashers: 0, restarts: 1/440, execs: 2645 (55/sec), cover: 487, uptime: 48s - -``` - -Let it run, and if the # of crashers is > 0, check out the reports in -the workdir where you should be able to find the panic goroutine stack -traces. -*/ - -package roaring - -import ( - "fmt" - "sort" - - "github.com/mschoch/smat" - "github.com/willf/bitset" -) - -// fuzz test using state machine driven by byte stream. -func FuzzSmat(data []byte) int { - return smat.Fuzz(&smatContext{}, smat.ActionID('S'), smat.ActionID('T'), - smatActionMap, data) -} - -var smatDebug = false - -func smatLog(prefix, format string, args ...interface{}) { - if smatDebug { - fmt.Print(prefix) - fmt.Printf(format, args...) - } -} - -type smatContext struct { - pairs []*smatPair - - // Two registers, x & y. - x int - y int - - actions int -} - -type smatPair struct { - bm *Bitmap - bs *bitset.BitSet -} - -// ------------------------------------------------------------------ - -var smatActionMap = smat.ActionMap{ - smat.ActionID('X'): smatAction("x++", smatWrap(func(c *smatContext) { c.x++ })), - smat.ActionID('x'): smatAction("x--", smatWrap(func(c *smatContext) { c.x-- })), - smat.ActionID('Y'): smatAction("y++", smatWrap(func(c *smatContext) { c.y++ })), - smat.ActionID('y'): smatAction("y--", smatWrap(func(c *smatContext) { c.y-- })), - smat.ActionID('*'): smatAction("x*y", smatWrap(func(c *smatContext) { c.x = c.x * c.y })), - smat.ActionID('<'): smatAction("x<<", smatWrap(func(c *smatContext) { c.x = c.x << 1 })), - - smat.ActionID('^'): smatAction("swap", smatWrap(func(c *smatContext) { c.x, c.y = c.y, c.x })), - - smat.ActionID('['): smatAction(" pushPair", smatWrap(smatPushPair)), - smat.ActionID(']'): smatAction(" popPair", smatWrap(smatPopPair)), - - smat.ActionID('B'): smatAction(" setBit", smatWrap(smatSetBit)), - smat.ActionID('b'): smatAction(" removeBit", smatWrap(smatRemoveBit)), - - smat.ActionID('o'): smatAction(" or", smatWrap(smatOr)), - smat.ActionID('a'): smatAction(" and", smatWrap(smatAnd)), - - smat.ActionID('#'): smatAction(" cardinality", smatWrap(smatCardinality)), - - smat.ActionID('O'): smatAction(" orCardinality", smatWrap(smatOrCardinality)), - smat.ActionID('A'): smatAction(" andCardinality", smatWrap(smatAndCardinality)), - - smat.ActionID('c'): smatAction(" clear", smatWrap(smatClear)), - smat.ActionID('r'): smatAction(" runOptimize", smatWrap(smatRunOptimize)), - - smat.ActionID('e'): smatAction(" isEmpty", smatWrap(smatIsEmpty)), - - smat.ActionID('i'): smatAction(" intersects", smatWrap(smatIntersects)), - - smat.ActionID('f'): smatAction(" flip", smatWrap(smatFlip)), - - smat.ActionID('-'): smatAction(" difference", smatWrap(smatDifference)), -} - -var smatRunningPercentActions []smat.PercentAction - -func init() { - var ids []int - for actionId := range smatActionMap { - ids = append(ids, int(actionId)) - } - sort.Ints(ids) - - pct := 100 / len(smatActionMap) - for _, actionId := range ids { - smatRunningPercentActions = append(smatRunningPercentActions, - smat.PercentAction{pct, smat.ActionID(actionId)}) - } - - smatActionMap[smat.ActionID('S')] = smatAction("SETUP", smatSetupFunc) - smatActionMap[smat.ActionID('T')] = smatAction("TEARDOWN", smatTeardownFunc) -} - -// We only have one smat state: running. -func smatRunning(next byte) smat.ActionID { - return smat.PercentExecute(next, smatRunningPercentActions...) -} - -func smatAction(name string, f func(ctx smat.Context) (smat.State, error)) func(smat.Context) (smat.State, error) { - return func(ctx smat.Context) (smat.State, error) { - c := ctx.(*smatContext) - c.actions++ - - smatLog(" ", "%s\n", name) - - return f(ctx) - } -} - -// Creates an smat action func based on a simple callback. -func smatWrap(cb func(c *smatContext)) func(smat.Context) (next smat.State, err error) { - return func(ctx smat.Context) (next smat.State, err error) { - c := ctx.(*smatContext) - cb(c) - return smatRunning, nil - } -} - -// Invokes a callback function with the input v bounded to len(c.pairs). -func (c *smatContext) withPair(v int, cb func(*smatPair)) { - if len(c.pairs) > 0 { - if v < 0 { - v = -v - } - v = v % len(c.pairs) - cb(c.pairs[v]) - } -} - -// ------------------------------------------------------------------ - -func smatSetupFunc(ctx smat.Context) (next smat.State, err error) { - return smatRunning, nil -} - -func smatTeardownFunc(ctx smat.Context) (next smat.State, err error) { - return nil, err -} - -// ------------------------------------------------------------------ - -func smatPushPair(c *smatContext) { - c.pairs = append(c.pairs, &smatPair{ - bm: NewBitmap(), - bs: bitset.New(100), - }) -} - -func smatPopPair(c *smatContext) { - if len(c.pairs) > 0 { - c.pairs = c.pairs[0 : len(c.pairs)-1] - } -} - -func smatSetBit(c *smatContext) { - c.withPair(c.x, func(p *smatPair) { - y := uint32(c.y) - p.bm.AddInt(int(y)) - p.bs.Set(uint(y)) - p.checkEquals() - }) -} - -func smatRemoveBit(c *smatContext) { - c.withPair(c.x, func(p *smatPair) { - y := uint32(c.y) - p.bm.Remove(y) - p.bs.Clear(uint(y)) - p.checkEquals() - }) -} - -func smatAnd(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c.withPair(c.y, func(py *smatPair) { - px.bm.And(py.bm) - px.bs = px.bs.Intersection(py.bs) - px.checkEquals() - py.checkEquals() - }) - }) -} - -func smatOr(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c.withPair(c.y, func(py *smatPair) { - px.bm.Or(py.bm) - px.bs = px.bs.Union(py.bs) - px.checkEquals() - py.checkEquals() - }) - }) -} - -func smatAndCardinality(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c.withPair(c.y, func(py *smatPair) { - c0 := px.bm.AndCardinality(py.bm) - c1 := px.bs.IntersectionCardinality(py.bs) - if c0 != uint64(c1) { - panic("expected same add cardinality") - } - px.checkEquals() - py.checkEquals() - }) - }) -} - -func smatOrCardinality(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c.withPair(c.y, func(py *smatPair) { - c0 := px.bm.OrCardinality(py.bm) - c1 := px.bs.UnionCardinality(py.bs) - if c0 != uint64(c1) { - panic("expected same or cardinality") - } - px.checkEquals() - py.checkEquals() - }) - }) -} - -func smatRunOptimize(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - px.bm.RunOptimize() - px.checkEquals() - }) -} - -func smatClear(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - px.bm.Clear() - px.bs = px.bs.ClearAll() - px.checkEquals() - }) -} - -func smatCardinality(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c0 := px.bm.GetCardinality() - c1 := px.bs.Count() - if c0 != uint64(c1) { - panic("expected same cardinality") - } - }) -} - -func smatIsEmpty(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c0 := px.bm.IsEmpty() - c1 := px.bs.None() - if c0 != c1 { - panic("expected same is empty") - } - }) -} - -func smatIntersects(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c.withPair(c.y, func(py *smatPair) { - v0 := px.bm.Intersects(py.bm) - v1 := px.bs.IntersectionCardinality(py.bs) > 0 - if v0 != v1 { - panic("intersects not equal") - } - - px.checkEquals() - py.checkEquals() - }) - }) -} - -func smatFlip(c *smatContext) { - c.withPair(c.x, func(p *smatPair) { - y := uint32(c.y) - p.bm.Flip(uint64(y), uint64(y)+1) - p.bs = p.bs.Flip(uint(y)) - p.checkEquals() - }) -} - -func smatDifference(c *smatContext) { - c.withPair(c.x, func(px *smatPair) { - c.withPair(c.y, func(py *smatPair) { - px.bm.AndNot(py.bm) - px.bs = px.bs.Difference(py.bs) - px.checkEquals() - py.checkEquals() - }) - }) -} - -func (p *smatPair) checkEquals() { - if !p.equalsBitSet(p.bs, p.bm) { - panic("bitset mismatch") - } -} - -func (p *smatPair) equalsBitSet(a *bitset.BitSet, b *Bitmap) bool { - for i, e := a.NextSet(0); e; i, e = a.NextSet(i + 1) { - if !b.ContainsInt(int(i)) { - fmt.Printf("in a bitset, not b bitmap, i: %d\n", i) - fmt.Printf(" a bitset: %s\n b bitmap: %s\n", - a.String(), b.String()) - return false - } - } - - i := b.Iterator() - for i.HasNext() { - v := i.Next() - if !a.Test(uint(v)) { - fmt.Printf("in b bitmap, not a bitset, v: %d\n", v) - fmt.Printf(" a bitset: %s\n b bitmap: %s\n", - a.String(), b.String()) - return false - } - } - - return true -} diff --git a/vendor/github.com/RoaringBitmap/roaring/util.go b/vendor/github.com/RoaringBitmap/roaring/util.go deleted file mode 100644 index 3a9a47236..000000000 --- a/vendor/github.com/RoaringBitmap/roaring/util.go +++ /dev/null @@ -1,304 +0,0 @@ -package roaring - -import ( - "math/rand" - "sort" -) - -const ( - arrayDefaultMaxSize = 4096 // containers with 4096 or fewer integers should be array containers. - arrayLazyLowerBound = 1024 - maxCapacity = 1 << 16 - serialCookieNoRunContainer = 12346 // only arrays and bitmaps - invalidCardinality = -1 - serialCookie = 12347 // runs, arrays, and bitmaps - noOffsetThreshold = 4 - - // MaxUint32 is the largest uint32 value. - MaxUint32 = 4294967295 - - // MaxRange is One more than the maximum allowed bitmap bit index. For use as an upper - // bound for ranges. - MaxRange uint64 = MaxUint32 + 1 - - // MaxUint16 is the largest 16 bit unsigned int. - // This is the largest value an interval16 can store. - MaxUint16 = 65535 - - // Compute wordSizeInBytes, the size of a word in bytes. - _m = ^uint64(0) - _logS = _m>>8&1 + _m>>16&1 + _m>>32&1 - wordSizeInBytes = 1 << _logS - - // other constants used in ctz_generic.go - wordSizeInBits = wordSizeInBytes << 3 // word size in bits -) - -const maxWord = 1< arrayDefaultMaxSize { - // bitmapContainer - return maxCapacity / 8 - } - // arrayContainer - return 2 * card -} - -func fill(arr []uint64, val uint64) { - for i := range arr { - arr[i] = val - } -} -func fillRange(arr []uint64, start, end int, val uint64) { - for i := start; i < end; i++ { - arr[i] = val - } -} - -func fillArrayAND(container []uint16, bitmap1, bitmap2 []uint64) { - if len(bitmap1) != len(bitmap2) { - panic("array lengths don't match") - } - // TODO: rewrite in assembly - pos := 0 - for k := range bitmap1 { - bitset := bitmap1[k] & bitmap2[k] - for bitset != 0 { - t := bitset & -bitset - container[pos] = uint16((k*64 + int(popcount(t-1)))) - pos = pos + 1 - bitset ^= t - } - } -} - -func fillArrayANDNOT(container []uint16, bitmap1, bitmap2 []uint64) { - if len(bitmap1) != len(bitmap2) { - panic("array lengths don't match") - } - // TODO: rewrite in assembly - pos := 0 - for k := range bitmap1 { - bitset := bitmap1[k] &^ bitmap2[k] - for bitset != 0 { - t := bitset & -bitset - container[pos] = uint16((k*64 + int(popcount(t-1)))) - pos = pos + 1 - bitset ^= t - } - } -} - -func fillArrayXOR(container []uint16, bitmap1, bitmap2 []uint64) { - if len(bitmap1) != len(bitmap2) { - panic("array lengths don't match") - } - // TODO: rewrite in assembly - pos := 0 - for k := 0; k < len(bitmap1); k++ { - bitset := bitmap1[k] ^ bitmap2[k] - for bitset != 0 { - t := bitset & -bitset - container[pos] = uint16((k*64 + int(popcount(t-1)))) - pos = pos + 1 - bitset ^= t - } - } -} - -func highbits(x uint32) uint16 { - return uint16(x >> 16) -} -func lowbits(x uint32) uint16 { - return uint16(x & 0xFFFF) -} - -const maxLowBit = 0xFFFF - -func flipBitmapRange(bitmap []uint64, start int, end int) { - if start >= end { - return - } - firstword := start / 64 - endword := (end - 1) / 64 - bitmap[firstword] ^= ^(^uint64(0) << uint(start%64)) - for i := firstword; i < endword; i++ { - bitmap[i] = ^bitmap[i] - } - bitmap[endword] ^= ^uint64(0) >> (uint(-end) % 64) -} - -func resetBitmapRange(bitmap []uint64, start int, end int) { - if start >= end { - return - } - firstword := start / 64 - endword := (end - 1) / 64 - if firstword == endword { - bitmap[firstword] &= ^((^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64))) - return - } - bitmap[firstword] &= ^(^uint64(0) << uint(start%64)) - for i := firstword + 1; i < endword; i++ { - bitmap[i] = 0 - } - bitmap[endword] &= ^(^uint64(0) >> (uint(-end) % 64)) - -} - -func setBitmapRange(bitmap []uint64, start int, end int) { - if start >= end { - return - } - firstword := start / 64 - endword := (end - 1) / 64 - if firstword == endword { - bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64)) - return - } - bitmap[firstword] |= ^uint64(0) << uint(start%64) - for i := firstword + 1; i < endword; i++ { - bitmap[i] = ^uint64(0) - } - bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64) -} - -func flipBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int { - before := wordCardinalityForBitmapRange(bitmap, start, end) - flipBitmapRange(bitmap, start, end) - after := wordCardinalityForBitmapRange(bitmap, start, end) - return int(after - before) -} - -func resetBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int { - before := wordCardinalityForBitmapRange(bitmap, start, end) - resetBitmapRange(bitmap, start, end) - after := wordCardinalityForBitmapRange(bitmap, start, end) - return int(after - before) -} - -func setBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int { - before := wordCardinalityForBitmapRange(bitmap, start, end) - setBitmapRange(bitmap, start, end) - after := wordCardinalityForBitmapRange(bitmap, start, end) - return int(after - before) -} - -func wordCardinalityForBitmapRange(bitmap []uint64, start int, end int) uint64 { - answer := uint64(0) - if start >= end { - return answer - } - firstword := start / 64 - endword := (end - 1) / 64 - for i := firstword; i <= endword; i++ { - answer += popcount(bitmap[i]) - } - return answer -} - -func selectBitPosition(w uint64, j int) int { - seen := 0 - - // Divide 64bit - part := w & 0xFFFFFFFF - n := popcount(part) - if n <= uint64(j) { - part = w >> 32 - seen += 32 - j -= int(n) - } - w = part - - // Divide 32bit - part = w & 0xFFFF - n = popcount(part) - if n <= uint64(j) { - part = w >> 16 - seen += 16 - j -= int(n) - } - w = part - - // Divide 16bit - part = w & 0xFF - n = popcount(part) - if n <= uint64(j) { - part = w >> 8 - seen += 8 - j -= int(n) - } - w = part - - // Lookup in final byte - var counter uint - for counter = 0; counter < 8; counter++ { - j -= int((w >> counter) & 1) - if j < 0 { - break - } - } - return seen + int(counter) - -} - -func panicOn(err error) { - if err != nil { - panic(err) - } -} - -type ph struct { - orig int - rand int -} - -type pha []ph - -func (p pha) Len() int { return len(p) } -func (p pha) Less(i, j int) bool { return p[i].rand < p[j].rand } -func (p pha) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func getRandomPermutation(n int) []int { - r := make([]ph, n) - for i := 0; i < n; i++ { - r[i].orig = i - r[i].rand = rand.Intn(1 << 29) - } - sort.Sort(pha(r)) - m := make([]int, n) - for i := range m { - m[i] = r[i].orig - } - return m -} - -func minOfInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxOfInt(a, b int) int { - if a > b { - return a - } - return b -} - -func maxOfUint16(a, b uint16) uint16 { - if a > b { - return a - } - return b -} - -func minOfUint16(a, b uint16) uint16 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/appleboy/easyssh-proxy/.drone.jsonnet b/vendor/github.com/appleboy/easyssh-proxy/.drone.jsonnet deleted file mode 100644 index a17527ef7..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/.drone.jsonnet +++ /dev/null @@ -1,5 +0,0 @@ -local pipeline = import 'pipeline.libsonnet'; - -[ - pipeline.test, -] diff --git a/vendor/github.com/appleboy/easyssh-proxy/.drone.yml b/vendor/github.com/appleboy/easyssh-proxy/.drone.yml deleted file mode 100644 index 1fe26dd5b..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/.drone.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -kind: pipeline -name: testing - -platform: - os: linux - arch: amd64 - -steps: -- name: vet - pull: always - image: golang:1.12 - commands: - - make vet - environment: - GO111MODULE: on - volumes: - - name: gopath - path: /go - -- name: lint - pull: always - image: golang:1.12 - commands: - - make lint - environment: - GO111MODULE: on - volumes: - - name: gopath - path: /go - -- name: misspell - pull: always - image: golang:1.12 - commands: - - make misspell-check - environment: - GO111MODULE: on - volumes: - - name: gopath - path: /go - -- name: embedmd - pull: always - image: golang:1.12 - commands: - - make embedmd - environment: - GO111MODULE: on - volumes: - - name: gopath - path: /go - -- name: test - pull: always - image: golang:1.12-alpine - commands: - - apk add git make curl perl bash build-base zlib-dev ucl-dev - - make ssh-server - - make test - environment: - GO111MODULE: on - volumes: - - name: gopath - path: /go - -- name: codecov - pull: always - image: robertstettner/drone-codecov - settings: - token: - from_secret: codecov_token - -volumes: -- name: gopath - temp: {} - -... diff --git a/vendor/github.com/appleboy/easyssh-proxy/.gitignore b/vendor/github.com/appleboy/easyssh-proxy/.gitignore deleted file mode 100644 index db171bc94..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -coverage.txt diff --git a/vendor/github.com/appleboy/easyssh-proxy/.revive.toml b/vendor/github.com/appleboy/easyssh-proxy/.revive.toml deleted file mode 100644 index 74743d5ef..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/.revive.toml +++ /dev/null @@ -1,25 +0,0 @@ -ignoreGeneratedHeader = false -severity = "warning" -confidence = 0.8 -errorCode = 1 -warningCode = 1 - -[rule.blank-imports] -[rule.context-as-argument] -[rule.context-keys-type] -[rule.dot-imports] -[rule.error-return] -[rule.error-strings] -[rule.error-naming] -[rule.exported] -[rule.if-return] -[rule.increment-decrement] -[rule.var-naming] -[rule.var-declaration] -[rule.package-comments] -[rule.range] -[rule.receiver-naming] -[rule.time-naming] -[rule.unexported-return] -[rule.indent-error-flow] -[rule.errorf] diff --git a/vendor/github.com/appleboy/easyssh-proxy/LICENSE b/vendor/github.com/appleboy/easyssh-proxy/LICENSE deleted file mode 100644 index b6df010e7..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Bo-Yi Wu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/appleboy/easyssh-proxy/Makefile b/vendor/github.com/appleboy/easyssh-proxy/Makefile deleted file mode 100644 index 13b4469d5..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/Makefile +++ /dev/null @@ -1,70 +0,0 @@ -GOFMT ?= gofmt "-s" -GO ?= go -PACKAGES ?= $(shell $(GO) list ./...) -SOURCES ?= $(shell find . -name "*.go" -type f) - -all: lint - -fmt: - $(GOFMT) -w $(SOURCES) - -vet: - $(GO) vet $(PACKAGES) - -lint: - @hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - $(GO) get -u github.com/mgechev/revive; \ - fi - revive -config .revive.toml ./... || exit 1 - -.PHONY: misspell-check -misspell-check: - @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - $(GO) get -u github.com/client9/misspell/cmd/misspell; \ - fi - misspell -error $(SOURCES) - -.PHONY: misspell -misspell: - @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - $(GO) get -u github.com/client9/misspell/cmd/misspell; \ - fi - misspell -w $(SOURCES) - -.PHONY: fmt-check -fmt-check: - @diff=$$($(GOFMT) -d $(SOURCES)); \ - if [ -n "$$diff" ]; then \ - echo "Please run 'make fmt' and commit the result:"; \ - echo "$${diff}"; \ - exit 1; \ - fi; - -test: fmt-check - @$(GO) test -v -cover -coverprofile coverage.txt $(PACKAGES) && echo "\n==>\033[32m Ok\033[m\n" || exit 1 - -embedmd: - @hash embedmd > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get -u github.com/campoy/embedmd; \ - fi - embedmd -d *.md - -clean: - go clean -x -i ./... - rm -rf coverage.txt $(EXECUTABLE) $(DIST) vendor - -ssh-server: - adduser -h /home/drone-scp -s /bin/bash -D -S drone-scp - echo drone-scp:1234 | chpasswd - mkdir -p /home/drone-scp/.ssh - chmod 700 /home/drone-scp/.ssh - cp tests/.ssh/id_rsa.pub /home/drone-scp/.ssh/authorized_keys - chown -R drone-scp /home/drone-scp/.ssh - # install ssh and start server - apk add --update openssh openrc - rm -rf /etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_dsa_key - sed -i 's/AllowTcpForwarding no/AllowTcpForwarding yes/g' /etc/ssh/sshd_config - ./tests/entrypoint.sh /usr/sbin/sshd -D & - -version: - @echo $(VERSION) diff --git a/vendor/github.com/appleboy/easyssh-proxy/README.md b/vendor/github.com/appleboy/easyssh-proxy/README.md deleted file mode 100644 index 01c73cb52..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/README.md +++ /dev/null @@ -1,187 +0,0 @@ -# easyssh-proxy - -[![GoDoc](https://godoc.org/github.com/appleboy/easyssh-proxy?status.svg)](https://godoc.org/github.com/appleboy/easyssh-proxy) -[![Build Status](https://cloud.drone.io/api/badges/appleboy/easyssh-proxy/status.svg)](https://cloud.drone.io/appleboy/easyssh-proxy) -[![codecov](https://codecov.io/gh/appleboy/easyssh-proxy/branch/master/graph/badge.svg)](https://codecov.io/gh/appleboy/easyssh-proxy) -[![Go Report Card](https://goreportcard.com/badge/github.com/appleboy/easyssh-proxy)](https://goreportcard.com/report/github.com/appleboy/easyssh-proxy) -[![Sourcegraph](https://sourcegraph.com/github.com/appleboy/easyssh-proxy/-/badge.svg)](https://sourcegraph.com/github.com/appleboy/easyssh-proxy?badge) -[![Release](https://github-release-version.herokuapp.com/github/appleboy/easyssh-proxy/release.svg?style=flat)](https://github.com/appleboy/easyssh-proxy/releases/latest) - -easyssh-proxy provides a simple implementation of some SSH protocol features in Go. - -## Feature - -This project is forked from [easyssh](https://github.com/hypersleep/easyssh) but add some features as the following. - -* [x] Support plain text of user private key. -* [x] Support key path of user private key. -* [x] Support Timeout for the TCP connection to establish. -* [x] Support SSH ProxyCommand. - -``` - +--------+ +----------+ +-----------+ - | Laptop | <--> | Jumphost | <--> | FooServer | - +--------+ +----------+ +-----------+ - - OR - - +--------+ +----------+ +-----------+ - | Laptop | <--> | Firewall | <--> | FooServer | - +--------+ +----------+ +-----------+ - 192.168.1.5 121.1.2.3 10.10.29.68 -``` - -## Usage: - -You can see `ssh`, `scp`, `ProxyCommand` on `examples` folder. - -### ssh - -See [example/ssh/ssh.go](./example/ssh/ssh.go) - -[embedmd]:# (example/ssh/ssh.go go) -```go -package main - -import ( - "fmt" - "time" - - "github.com/appleboy/easyssh-proxy" -) - -func main() { - // Create MakeConfig instance with remote username, server address and path to private key. - ssh := &easyssh.MakeConfig{ - User: "appleboy", - Server: "example.com", - // Optional key or Password without either we try to contact your agent SOCKET - //Password: "password", - // Paste your source content of private key - // Key: `-----BEGIN RSA PRIVATE KEY----- - // MIIEpAIBAAKCAQEA4e2D/qPN08pzTac+a8ZmlP1ziJOXk45CynMPtva0rtK/RB26 - // 7XC9wlRna4b3Ln8ew3q1ZcBjXwD4ppbTlmwAfQIaZTGJUgQbdsO9YA== - // -----END RSA PRIVATE KEY----- - // `, - KeyPath: "/Users/username/.ssh/id_rsa", - Port: "22", - Timeout: 60 * time.Second, - } - - // Call Run method with command you want to run on remote server. - stdout, stderr, done, err := ssh.Run("ls -al", 60) - // Handle errors - if err != nil { - panic("Can't run remote command: " + err.Error()) - } else { - fmt.Println("don is :", done, "stdout is :", stdout, "; stderr is :", stderr) - } - -} -``` - -### scp - -See [example/scp/scp.go](./example/scp/scp.go) - -[embedmd]:# (example/scp/scp.go go) -```go -package main - -import ( - "fmt" - - "github.com/appleboy/easyssh-proxy" -) - -func main() { - // Create MakeConfig instance with remote username, server address and path to private key. - ssh := &easyssh.MakeConfig{ - User: "appleboy", - Server: "example.com", - Password: "123qwe", - Port: "22", - } - - // Call Scp method with file you want to upload to remote server. - // Please make sure the `tmp` floder exists. - err := ssh.Scp("/root/source.csv", "/tmp/target.csv") - - // Handle errors - if err != nil { - panic("Can't run remote command: " + err.Error()) - } else { - fmt.Println("success") - } -} -``` - -### SSH ProxyCommand - -See [example/proxy/proxy.go](./example/proxy/proxy.go) - -[embedmd]:# (example/proxy/proxy.go go /\tssh :=/ /\t}$/) -```go - ssh := &easyssh.MakeConfig{ - User: "drone-scp", - Server: "localhost", - Port: "22", - KeyPath: "./tests/.ssh/id_rsa", - Proxy: easyssh.DefaultConfig{ - User: "drone-scp", - Server: "localhost", - Port: "22", - KeyPath: "./tests/.ssh/id_rsa", - }, - } -``` - -### SSH Stream Log - -See [example/stream/stream.go](./example/stream/stream.go) - -[embedmd]:# (example/stream/stream.go go /func/ /^}$/) -```go -func main() { - // Create MakeConfig instance with remote username, server address and path to private key. - ssh := &easyssh.MakeConfig{ - Server: "localhost", - User: "drone-scp", - KeyPath: "./tests/.ssh/id_rsa", - Port: "22", - Timeout: 60 * time.Second, - } - - // Call Run method with command you want to run on remote server. - stdoutChan, stderrChan, doneChan, errChan, err := ssh.Stream("for i in {1..5}; do echo ${i}; sleep 1; done; exit 2;", 60) - // Handle errors - if err != nil { - panic("Can't run remote command: " + err.Error()) - } else { - // read from the output channel until the done signal is passed - isTimeout := true - loop: - for { - select { - case isTimeout = <-doneChan: - break loop - case outline := <-stdoutChan: - fmt.Println("out:", outline) - case errline := <-stderrChan: - fmt.Println("err:", errline) - case err = <-errChan: - } - } - - // get exit code or command error. - if err != nil { - fmt.Println("err: " + err.Error()) - } - - // command time out - if !isTimeout { - fmt.Println("Error: command timeout") - } - } -} -``` diff --git a/vendor/github.com/appleboy/easyssh-proxy/easyssh.go b/vendor/github.com/appleboy/easyssh-proxy/easyssh.go deleted file mode 100644 index 87c906919..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/easyssh.go +++ /dev/null @@ -1,329 +0,0 @@ -// Package easyssh provides a simple implementation of some SSH protocol -// features in Go. You can simply run a command on a remote server or get a file -// even simpler than native console SSH client. You don't need to think about -// Dials, sessions, defers, or public keys... Let easyssh think about it! -package easyssh - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" -) - -var defaultTimeout = 60 * time.Second - -type ( - // MakeConfig Contains main authority information. - // User field should be a name of user on remote server (ex. john in ssh john@example.com). - // Server field should be a remote machine address (ex. example.com in ssh john@example.com) - // Key is a path to private key on your local machine. - // Port is SSH server port on remote machine. - // Note: easyssh looking for private key in user's home directory (ex. /home/john + Key). - // Then ensure your Key begins from '/' (ex. /.ssh/id_rsa) - MakeConfig struct { - User string - Server string - Key string - KeyPath string - Port string - Password string - Timeout time.Duration - Proxy DefaultConfig - } - - // DefaultConfig for ssh proxy config - DefaultConfig struct { - User string - Server string - Key string - KeyPath string - Port string - Password string - Timeout time.Duration - } -) - -// returns ssh.Signer from user you running app home path + cutted key path. -// (ex. pubkey,err := getKeyFile("/.ssh/id_rsa") ) -func getKeyFile(keypath string) (ssh.Signer, error) { - buf, err := ioutil.ReadFile(keypath) - if err != nil { - return nil, err - } - - pubkey, err := ssh.ParsePrivateKey(buf) - if err != nil { - return nil, err - } - - return pubkey, nil -} - -// returns *ssh.ClientConfig and io.Closer. -// if io.Closer is not nil, io.Closer.Close() should be called when -// *ssh.ClientConfig is no longer used. -func getSSHConfig(config DefaultConfig) (*ssh.ClientConfig, io.Closer) { - var sshAgent io.Closer - - // auths holds the detected ssh auth methods - auths := []ssh.AuthMethod{} - - // figure out what auths are requested, what is supported - if config.Password != "" { - auths = append(auths, ssh.Password(config.Password)) - } - if config.KeyPath != "" { - if pubkey, err := getKeyFile(config.KeyPath); err != nil { - log.Printf("getKeyFile error: %v\n", err) - } else { - auths = append(auths, ssh.PublicKeys(pubkey)) - } - } - - if config.Key != "" { - if signer, err := ssh.ParsePrivateKey([]byte(config.Key)); err != nil { - log.Printf("ssh.ParsePrivateKey: %v\n", err) - } else { - auths = append(auths, ssh.PublicKeys(signer)) - } - } - - if sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { - auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)) - } - - return &ssh.ClientConfig{ - Timeout: config.Timeout, - User: config.User, - Auth: auths, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - }, sshAgent -} - -// Connect to remote server using MakeConfig struct and returns *ssh.Session -func (ssh_conf *MakeConfig) Connect() (*ssh.Session, error) { - var client *ssh.Client - var err error - - targetConfig, closer := getSSHConfig(DefaultConfig{ - User: ssh_conf.User, - Key: ssh_conf.Key, - KeyPath: ssh_conf.KeyPath, - Password: ssh_conf.Password, - Timeout: ssh_conf.Timeout, - }) - if closer != nil { - defer closer.Close() - } - - // Enable proxy command - if ssh_conf.Proxy.Server != "" { - proxyConfig, closer := getSSHConfig(DefaultConfig{ - User: ssh_conf.Proxy.User, - Key: ssh_conf.Proxy.Key, - KeyPath: ssh_conf.Proxy.KeyPath, - Password: ssh_conf.Proxy.Password, - Timeout: ssh_conf.Proxy.Timeout, - }) - if closer != nil { - defer closer.Close() - } - - proxyClient, err := ssh.Dial("tcp", net.JoinHostPort(ssh_conf.Proxy.Server, ssh_conf.Proxy.Port), proxyConfig) - if err != nil { - return nil, err - } - - conn, err := proxyClient.Dial("tcp", net.JoinHostPort(ssh_conf.Server, ssh_conf.Port)) - if err != nil { - return nil, err - } - - ncc, chans, reqs, err := ssh.NewClientConn(conn, net.JoinHostPort(ssh_conf.Server, ssh_conf.Port), targetConfig) - if err != nil { - return nil, err - } - - client = ssh.NewClient(ncc, chans, reqs) - } else { - client, err = ssh.Dial("tcp", net.JoinHostPort(ssh_conf.Server, ssh_conf.Port), targetConfig) - if err != nil { - return nil, err - } - } - - session, err := client.NewSession() - if err != nil { - return nil, err - } - - return session, nil -} - -// Stream returns one channel that combines the stdout and stderr of the command -// as it is run on the remote machine, and another that sends true when the -// command is done. The sessions and channels will then be closed. -func (ssh_conf *MakeConfig) Stream(command string, timeout ...time.Duration) (<-chan string, <-chan string, <-chan bool, <-chan error, error) { - // continuously send the command's output over the channel - stdoutChan := make(chan string) - stderrChan := make(chan string) - doneChan := make(chan bool) - errChan := make(chan error) - - // connect to remote host - session, err := ssh_conf.Connect() - if err != nil { - return stdoutChan, stderrChan, doneChan, errChan, err - } - // defer session.Close() - // connect to both outputs (they are of type io.Reader) - outReader, err := session.StdoutPipe() - if err != nil { - return stdoutChan, stderrChan, doneChan, errChan, err - } - errReader, err := session.StderrPipe() - if err != nil { - return stdoutChan, stderrChan, doneChan, errChan, err - } - err = session.Start(command) - if err != nil { - return stdoutChan, stderrChan, doneChan, errChan, err - } - - // combine outputs, create a line-by-line scanner - stdoutReader := io.MultiReader(outReader) - stderrReader := io.MultiReader(errReader) - stdoutScanner := bufio.NewScanner(stdoutReader) - stderrScanner := bufio.NewScanner(stderrReader) - - go func(stdoutScanner, stderrScanner *bufio.Scanner, stdoutChan, stderrChan chan string, doneChan chan bool, errChan chan error) { - defer close(stdoutChan) - defer close(stderrChan) - defer close(doneChan) - defer close(errChan) - defer session.Close() - - // default timeout value - executeTimeout := defaultTimeout - if len(timeout) > 0 { - executeTimeout = timeout[0] - } - timeoutChan := time.After(executeTimeout) - res := make(chan struct{}, 1) - var resWg sync.WaitGroup - resWg.Add(2) - - go func() { - for stdoutScanner.Scan() { - stdoutChan <- stdoutScanner.Text() - } - resWg.Done() - }() - - go func() { - for stderrScanner.Scan() { - stderrChan <- stderrScanner.Text() - } - resWg.Done() - }() - - go func() { - resWg.Wait() - // close all of our open resources - res <- struct{}{} - }() - - select { - case <-res: - errChan <- session.Wait() - doneChan <- true - case <-timeoutChan: - stderrChan <- "Run Command Timeout!" - errChan <- nil - doneChan <- false - } - }(stdoutScanner, stderrScanner, stdoutChan, stderrChan, doneChan, errChan) - - return stdoutChan, stderrChan, doneChan, errChan, err -} - -// Run command on remote machine and returns its stdout as a string -func (ssh_conf *MakeConfig) Run(command string, timeout ...time.Duration) (outStr string, errStr string, isTimeout bool, err error) { - stdoutChan, stderrChan, doneChan, errChan, err := ssh_conf.Stream(command, timeout...) - if err != nil { - return outStr, errStr, isTimeout, err - } - // read from the output channel until the done signal is passed -loop: - for { - select { - case isTimeout = <-doneChan: - break loop - case outline := <-stdoutChan: - if outline != "" { - outStr += outline + "\n" - } - case errline := <-stderrChan: - if errline != "" { - errStr += errline + "\n" - } - case err = <-errChan: - } - } - // return the concatenation of all signals from the output channel - return outStr, errStr, isTimeout, err -} - -// Scp uploads sourceFile to remote machine like native scp console app. -func (ssh_conf *MakeConfig) Scp(sourceFile string, etargetFile string) error { - session, err := ssh_conf.Connect() - - if err != nil { - return err - } - defer session.Close() - - targetFile := filepath.Base(etargetFile) - - src, srcErr := os.Open(sourceFile) - - if srcErr != nil { - return srcErr - } - - srcStat, statErr := src.Stat() - - if statErr != nil { - return statErr - } - - go func() { - w, err := session.StdinPipe() - - if err != nil { - return - } - defer w.Close() - - fmt.Fprintln(w, "C0644", srcStat.Size(), targetFile) - - if srcStat.Size() > 0 { - io.Copy(w, src) - fmt.Fprint(w, "\x00") - } else { - fmt.Fprint(w, "\x00") - } - }() - - return session.Run(fmt.Sprintf("scp -tr %s", etargetFile)) -} diff --git a/vendor/github.com/appleboy/easyssh-proxy/go.mod b/vendor/github.com/appleboy/easyssh-proxy/go.mod deleted file mode 100644 index 52b827230..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/appleboy/easyssh-proxy - -go 1.12 - -require ( - github.com/stretchr/testify v1.3.0 - golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 -) diff --git a/vendor/github.com/appleboy/easyssh-proxy/go.sum b/vendor/github.com/appleboy/easyssh-proxy/go.sum deleted file mode 100644 index 1311ae3dc..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/appleboy/easyssh-proxy/pipeline.libsonnet b/vendor/github.com/appleboy/easyssh-proxy/pipeline.libsonnet deleted file mode 100644 index 599475be5..000000000 --- a/vendor/github.com/appleboy/easyssh-proxy/pipeline.libsonnet +++ /dev/null @@ -1,113 +0,0 @@ -{ - test:: { - kind: 'pipeline', - name: 'testing', - platform: { - os: 'linux', - arch: 'amd64', - }, - steps: [ - { - name: 'vet', - image: 'golang:1.12', - pull: 'always', - environment: { - GO111MODULE: 'on', - }, - commands: [ - 'make vet', - ], - volumes: [ - { - name: 'gopath', - path: '/go', - }, - ], - }, - { - name: 'lint', - image: 'golang:1.12', - pull: 'always', - environment: { - GO111MODULE: 'on', - }, - commands: [ - 'make lint', - ], - volumes: [ - { - name: 'gopath', - path: '/go', - }, - ], - }, - { - name: 'misspell', - image: 'golang:1.12', - pull: 'always', - environment: { - GO111MODULE: 'on', - }, - commands: [ - 'make misspell-check', - ], - volumes: [ - { - name: 'gopath', - path: '/go', - }, - ], - }, - { - name: 'embedmd', - image: 'golang:1.12', - pull: 'always', - environment: { - GO111MODULE: 'on', - }, - commands: [ - 'make embedmd', - ], - volumes: [ - { - name: 'gopath', - path: '/go', - }, - ], - }, - { - name: 'test', - image: 'golang:1.12-alpine', - pull: 'always', - environment: { - GO111MODULE: 'on', - }, - commands: [ - 'apk add git make curl perl bash build-base zlib-dev ucl-dev', - 'make ssh-server', - 'make test', - ], - volumes: [ - { - name: 'gopath', - path: '/go', - }, - ], - }, - { - name: 'codecov', - image: 'robertstettner/drone-codecov', - pull: 'always', - settings: { - token: { 'from_secret': 'codecov_token' }, - }, - }, - ], - volumes: [ - { - name: 'gopath', - temp: {}, - }, - ], - } -} diff --git a/vendor/github.com/astaxie/beego/.gitignore b/vendor/github.com/astaxie/beego/.gitignore deleted file mode 100644 index e1b652910..000000000 --- a/vendor/github.com/astaxie/beego/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.idea -.vscode -.DS_Store -*.swp -*.swo -beego.iml diff --git a/vendor/github.com/astaxie/beego/.gosimpleignore b/vendor/github.com/astaxie/beego/.gosimpleignore deleted file mode 100644 index 84df9b95d..000000000 --- a/vendor/github.com/astaxie/beego/.gosimpleignore +++ /dev/null @@ -1,4 +0,0 @@ -github.com/astaxie/beego/*/*:S1012 -github.com/astaxie/beego/*:S1012 -github.com/astaxie/beego/*/*:S1007 -github.com/astaxie/beego/*:S1007 \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/.travis.yml b/vendor/github.com/astaxie/beego/.travis.yml deleted file mode 100644 index ed04c9d1b..000000000 --- a/vendor/github.com/astaxie/beego/.travis.yml +++ /dev/null @@ -1,63 +0,0 @@ -language: go - -go: - - "1.10.x" - - "1.11.x" -services: - - redis-server - - mysql - - postgresql - - memcached -env: - - ORM_DRIVER=sqlite3 ORM_SOURCE=$TRAVIS_BUILD_DIR/orm_test.db - - ORM_DRIVER=postgres ORM_SOURCE="user=postgres dbname=orm_test sslmode=disable" -before_install: - - git clone git://github.com/ideawu/ssdb.git - - cd ssdb - - make - - cd .. -install: - - go get github.com/lib/pq - - go get github.com/go-sql-driver/mysql - - go get github.com/mattn/go-sqlite3 - - go get github.com/bradfitz/gomemcache/memcache - - go get github.com/gomodule/redigo/redis - - go get github.com/beego/x2j - - go get github.com/couchbase/go-couchbase - - go get github.com/beego/goyaml2 - - go get gopkg.in/yaml.v2 - - go get github.com/belogik/goes - - go get github.com/siddontang/ledisdb/config - - go get github.com/siddontang/ledisdb/ledis - - go get github.com/ssdb/gossdb/ssdb - - go get github.com/cloudflare/golz4 - - go get github.com/gogo/protobuf/proto - - go get github.com/Knetic/govaluate - - go get github.com/casbin/casbin - - go get github.com/elazarl/go-bindata-assetfs - - go get -u honnef.co/go/tools/cmd/gosimple - - go get -u github.com/mdempsky/unconvert - - go get -u github.com/gordonklaus/ineffassign - - go get -u github.com/golang/lint/golint - - go get -u github.com/go-redis/redis -before_script: - - psql --version - - sh -c "if [ '$ORM_DRIVER' = 'postgres' ]; then psql -c 'create database orm_test;' -U postgres; fi" - - sh -c "if [ '$ORM_DRIVER' = 'mysql' ]; then mysql -u root -e 'create database orm_test;'; fi" - - sh -c "if [ '$ORM_DRIVER' = 'sqlite' ]; then touch $TRAVIS_BUILD_DIR/orm_test.db; fi" - - sh -c "go get github.com/golang/lint/golint; golint ./...;" - - sh -c "go list ./... | grep -v vendor | xargs go vet -v" - - mkdir -p res/var - - ./ssdb/ssdb-server ./ssdb/ssdb.conf -d -after_script: - - killall -w ssdb-server - - rm -rf ./res/var/* -script: - - go test -v ./... - - gosimple -ignore "$(cat .gosimpleignore)" $(go list ./... | grep -v /vendor/) - - unconvert $(go list ./... | grep -v /vendor/) - - ineffassign . - - find . ! \( -path './vendor' -prune \) -type f -name '*.go' -print0 | xargs -0 gofmt -l -s - - golint ./... -addons: - postgresql: "9.6" diff --git a/vendor/github.com/astaxie/beego/CONTRIBUTING.md b/vendor/github.com/astaxie/beego/CONTRIBUTING.md deleted file mode 100644 index 9d5116165..000000000 --- a/vendor/github.com/astaxie/beego/CONTRIBUTING.md +++ /dev/null @@ -1,52 +0,0 @@ -# Contributing to beego - -beego is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - -Here are instructions to get you started. They are probably not perfect, -please let us know if anything feels wrong or incomplete. - -## Contribution guidelines - -### Pull requests - -First of all. beego follow the gitflow. So please send you pull request -to **develop** branch. We will close the pull request to master branch. - -We are always happy to receive pull requests, and do our best to -review them as fast as possible. Not sure if that typo is worth a pull -request? Do it! We will appreciate it. - -If your pull request is not accepted on the first try, don't be -discouraged! Sometimes we can make a mistake, please do more explaining -for us. We will appreciate it. - -We're trying very hard to keep beego simple and fast. We don't want it -to do everything for everybody. This means that we might decide against -incorporating a new feature. But we will give you some advice on how to -do it in other way. - -### Create issues - -Any significant improvement should be documented as [a GitHub -issue](https://github.com/astaxie/beego/issues) before anybody -starts working on it. - -Also when filing an issue, make sure to answer these five questions: - -- What version of beego are you using (bee version)? -- What operating system and processor architecture are you using? -- What did you do? -- What did you expect to see? -- What did you see instead? - -### but check existing issues and docs first! - -Please take a moment to check that an issue doesn't already exist -documenting your bug report or improvement proposal. If it does, it -never hurts to add a quick "+1" or "I have this problem too". This will -help prioritize the most common problems and requests. - -Also if you don't know how to use it. please make sure you have read though -the docs in http://beego.me/docs \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/LICENSE b/vendor/github.com/astaxie/beego/LICENSE deleted file mode 100644 index 5dbd42435..000000000 --- a/vendor/github.com/astaxie/beego/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 astaxie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/README.md b/vendor/github.com/astaxie/beego/README.md deleted file mode 100644 index 5063645c4..000000000 --- a/vendor/github.com/astaxie/beego/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Beego [![Build Status](https://travis-ci.org/astaxie/beego.svg?branch=master)](https://travis-ci.org/astaxie/beego) [![GoDoc](http://godoc.org/github.com/astaxie/beego?status.svg)](http://godoc.org/github.com/astaxie/beego) [![Foundation](https://img.shields.io/badge/Golang-Foundation-green.svg)](http://golangfoundation.org) [![Go Report Card](https://goreportcard.com/badge/github.com/astaxie/beego)](https://goreportcard.com/report/github.com/astaxie/beego) - - -beego is used for rapid development of RESTful APIs, web apps and backend services in Go. -It is inspired by Tornado, Sinatra and Flask. beego has some Go-specific features such as interfaces and struct embedding. - - Response time ranking: [web-frameworks](https://github.com/the-benchmarker/web-frameworks). - -###### More info at [beego.me](http://beego.me). - -## Quick Start - -#### Download and install - - go get github.com/astaxie/beego - -#### Create file `hello.go` -```go -package main - -import "github.com/astaxie/beego" - -func main(){ - beego.Run() -} -``` -#### Build and run - - go build hello.go - ./hello - -#### Go to [http://localhost:8080](http://localhost:8080) - -Congratulations! You've just built your first **beego** app. - -###### Please see [Documentation](http://beego.me/docs) for more. - -## Features - -* RESTful support -* MVC architecture -* Modularity -* Auto API documents -* Annotation router -* Namespace -* Powerful development tools -* Full stack for Web & API - -## Documentation - -* [English](http://beego.me/docs/intro/) -* [中文文档](http://beego.me/docs/intro/) -* [Русский](http://beego.me/docs/intro/) - -## Community - -* [http://beego.me/community](http://beego.me/community) -* Welcome to join us in Slack: [https://beego.slack.com](https://beego.slack.com), you can get invited from [here](https://github.com/beego/beedoc/issues/232) - -## License - -beego source code is licensed under the Apache Licence, Version 2.0 -(http://www.apache.org/licenses/LICENSE-2.0.html). diff --git a/vendor/github.com/astaxie/beego/admin.go b/vendor/github.com/astaxie/beego/admin.go deleted file mode 100644 index 256065011..000000000 --- a/vendor/github.com/astaxie/beego/admin.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "os" - "reflect" - "text/template" - "time" - - "github.com/astaxie/beego/grace" - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/toolbox" - "github.com/astaxie/beego/utils" -) - -// BeeAdminApp is the default adminApp used by admin module. -var beeAdminApp *adminApp - -// FilterMonitorFunc is default monitor filter when admin module is enable. -// if this func returns, admin module records qps for this request by condition of this function logic. -// usage: -// func MyFilterMonitor(method, requestPath string, t time.Duration, pattern string, statusCode int) bool { -// if method == "POST" { -// return false -// } -// if t.Nanoseconds() < 100 { -// return false -// } -// if strings.HasPrefix(requestPath, "/astaxie") { -// return false -// } -// return true -// } -// beego.FilterMonitorFunc = MyFilterMonitor. -var FilterMonitorFunc func(string, string, time.Duration, string, int) bool - -func init() { - beeAdminApp = &adminApp{ - routers: make(map[string]http.HandlerFunc), - } - beeAdminApp.Route("/", adminIndex) - beeAdminApp.Route("/qps", qpsIndex) - beeAdminApp.Route("/prof", profIndex) - beeAdminApp.Route("/healthcheck", healthcheck) - beeAdminApp.Route("/task", taskStatus) - beeAdminApp.Route("/listconf", listConf) - FilterMonitorFunc = func(string, string, time.Duration, string, int) bool { return true } -} - -// AdminIndex is the default http.Handler for admin module. -// it matches url pattern "/". -func adminIndex(rw http.ResponseWriter, _ *http.Request) { - execTpl(rw, map[interface{}]interface{}{}, indexTpl, defaultScriptsTpl) -} - -// QpsIndex is the http.Handler for writing qps statistics map result info in http.ResponseWriter. -// it's registered with url pattern "/qps" in admin module. -func qpsIndex(rw http.ResponseWriter, _ *http.Request) { - data := make(map[interface{}]interface{}) - data["Content"] = toolbox.StatisticsMap.GetMap() - - // do html escape before display path, avoid xss - if content, ok := (data["Content"]).(M); ok { - if resultLists, ok := (content["Data"]).([][]string); ok { - for i := range resultLists { - if len(resultLists[i]) > 0 { - resultLists[i][0] = template.HTMLEscapeString(resultLists[i][0]) - } - } - } - } - - execTpl(rw, data, qpsTpl, defaultScriptsTpl) -} - -// ListConf is the http.Handler of displaying all beego configuration values as key/value pair. -// it's registered with url pattern "/listconf" in admin module. -func listConf(rw http.ResponseWriter, r *http.Request) { - r.ParseForm() - command := r.Form.Get("command") - if command == "" { - rw.Write([]byte("command not support")) - return - } - - data := make(map[interface{}]interface{}) - switch command { - case "conf": - m := make(M) - list("BConfig", BConfig, m) - m["AppConfigPath"] = appConfigPath - m["AppConfigProvider"] = appConfigProvider - tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl)) - tmpl = template.Must(tmpl.Parse(configTpl)) - tmpl = template.Must(tmpl.Parse(defaultScriptsTpl)) - - data["Content"] = m - - tmpl.Execute(rw, data) - - case "router": - content := PrintTree() - content["Fields"] = []string{ - "Router Pattern", - "Methods", - "Controller", - } - data["Content"] = content - data["Title"] = "Routers" - execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl) - case "filter": - var ( - content = M{ - "Fields": []string{ - "Router Pattern", - "Filter Function", - }, - } - filterTypes = []string{} - filterTypeData = make(M) - ) - - if BeeApp.Handlers.enableFilter { - var filterType string - for k, fr := range map[int]string{ - BeforeStatic: "Before Static", - BeforeRouter: "Before Router", - BeforeExec: "Before Exec", - AfterExec: "After Exec", - FinishRouter: "Finish Router"} { - if bf := BeeApp.Handlers.filters[k]; len(bf) > 0 { - filterType = fr - filterTypes = append(filterTypes, filterType) - resultList := new([][]string) - for _, f := range bf { - var result = []string{ - f.pattern, - utils.GetFuncName(f.filterFunc), - } - *resultList = append(*resultList, result) - } - filterTypeData[filterType] = resultList - } - } - } - - content["Data"] = filterTypeData - content["Methods"] = filterTypes - - data["Content"] = content - data["Title"] = "Filters" - execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl) - default: - rw.Write([]byte("command not support")) - } -} - -func list(root string, p interface{}, m M) { - pt := reflect.TypeOf(p) - pv := reflect.ValueOf(p) - if pt.Kind() == reflect.Ptr { - pt = pt.Elem() - pv = pv.Elem() - } - for i := 0; i < pv.NumField(); i++ { - var key string - if root == "" { - key = pt.Field(i).Name - } else { - key = root + "." + pt.Field(i).Name - } - if pv.Field(i).Kind() == reflect.Struct { - list(key, pv.Field(i).Interface(), m) - } else { - m[key] = pv.Field(i).Interface() - } - } -} - -// PrintTree prints all registered routers. -func PrintTree() M { - var ( - content = M{} - methods = []string{} - methodsData = make(M) - ) - for method, t := range BeeApp.Handlers.routers { - - resultList := new([][]string) - - printTree(resultList, t) - - methods = append(methods, method) - methodsData[method] = resultList - } - - content["Data"] = methodsData - content["Methods"] = methods - return content -} - -func printTree(resultList *[][]string, t *Tree) { - for _, tr := range t.fixrouters { - printTree(resultList, tr) - } - if t.wildcard != nil { - printTree(resultList, t.wildcard) - } - for _, l := range t.leaves { - if v, ok := l.runObject.(*ControllerInfo); ok { - if v.routerType == routerTypeBeego { - var result = []string{ - v.pattern, - fmt.Sprintf("%s", v.methods), - v.controllerType.String(), - } - *resultList = append(*resultList, result) - } else if v.routerType == routerTypeRESTFul { - var result = []string{ - v.pattern, - fmt.Sprintf("%s", v.methods), - "", - } - *resultList = append(*resultList, result) - } else if v.routerType == routerTypeHandler { - var result = []string{ - v.pattern, - "", - "", - } - *resultList = append(*resultList, result) - } - } - } -} - -// ProfIndex is a http.Handler for showing profile command. -// it's in url pattern "/prof" in admin module. -func profIndex(rw http.ResponseWriter, r *http.Request) { - r.ParseForm() - command := r.Form.Get("command") - if command == "" { - return - } - - var ( - format = r.Form.Get("format") - data = make(map[interface{}]interface{}) - result bytes.Buffer - ) - toolbox.ProcessInput(command, &result) - data["Content"] = result.String() - - if format == "json" && command == "gc summary" { - dataJSON, err := json.Marshal(data) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - rw.Header().Set("Content-Type", "application/json") - rw.Write(dataJSON) - return - } - - data["Title"] = command - defaultTpl := defaultScriptsTpl - if command == "gc summary" { - defaultTpl = gcAjaxTpl - } - execTpl(rw, data, profillingTpl, defaultTpl) -} - -// Healthcheck is a http.Handler calling health checking and showing the result. -// it's in "/healthcheck" pattern in admin module. -func healthcheck(rw http.ResponseWriter, _ *http.Request) { - var ( - result []string - data = make(map[interface{}]interface{}) - resultList = new([][]string) - content = M{ - "Fields": []string{"Name", "Message", "Status"}, - } - ) - - for name, h := range toolbox.AdminCheckList { - if err := h.Check(); err != nil { - result = []string{ - "error", - name, - err.Error(), - } - } else { - result = []string{ - "success", - name, - "OK", - } - } - *resultList = append(*resultList, result) - } - - content["Data"] = resultList - data["Content"] = content - data["Title"] = "Health Check" - execTpl(rw, data, healthCheckTpl, defaultScriptsTpl) -} - -// TaskStatus is a http.Handler with running task status (task name, status and the last execution). -// it's in "/task" pattern in admin module. -func taskStatus(rw http.ResponseWriter, req *http.Request) { - data := make(map[interface{}]interface{}) - - // Run Task - req.ParseForm() - taskname := req.Form.Get("taskname") - if taskname != "" { - if t, ok := toolbox.AdminTaskList[taskname]; ok { - if err := t.Run(); err != nil { - data["Message"] = []string{"error", fmt.Sprintf("%s", err)} - } - data["Message"] = []string{"success", fmt.Sprintf("%s run success,Now the Status is
%s", taskname, t.GetStatus())} - } else { - data["Message"] = []string{"warning", fmt.Sprintf("there's no task which named: %s", taskname)} - } - } - - // List Tasks - content := make(M) - resultList := new([][]string) - var fields = []string{ - "Task Name", - "Task Spec", - "Task Status", - "Last Time", - "", - } - for tname, tk := range toolbox.AdminTaskList { - result := []string{ - tname, - tk.GetSpec(), - tk.GetStatus(), - tk.GetPrev().String(), - } - *resultList = append(*resultList, result) - } - - content["Fields"] = fields - content["Data"] = resultList - data["Content"] = content - data["Title"] = "Tasks" - execTpl(rw, data, tasksTpl, defaultScriptsTpl) -} - -func execTpl(rw http.ResponseWriter, data map[interface{}]interface{}, tpls ...string) { - tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl)) - for _, tpl := range tpls { - tmpl = template.Must(tmpl.Parse(tpl)) - } - tmpl.Execute(rw, data) -} - -// adminApp is an http.HandlerFunc map used as beeAdminApp. -type adminApp struct { - routers map[string]http.HandlerFunc -} - -// Route adds http.HandlerFunc to adminApp with url pattern. -func (admin *adminApp) Route(pattern string, f http.HandlerFunc) { - admin.routers[pattern] = f -} - -// Run adminApp http server. -// Its addr is defined in configuration file as adminhttpaddr and adminhttpport. -func (admin *adminApp) Run() { - if len(toolbox.AdminTaskList) > 0 { - toolbox.StartTask() - } - addr := BConfig.Listen.AdminAddr - - if BConfig.Listen.AdminPort != 0 { - addr = fmt.Sprintf("%s:%d", BConfig.Listen.AdminAddr, BConfig.Listen.AdminPort) - } - for p, f := range admin.routers { - http.Handle(p, f) - } - logs.Info("Admin server Running on %s", addr) - - var err error - if BConfig.Listen.Graceful { - err = grace.ListenAndServe(addr, nil) - } else { - err = http.ListenAndServe(addr, nil) - } - if err != nil { - logs.Critical("Admin ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid())) - } -} diff --git a/vendor/github.com/astaxie/beego/adminui.go b/vendor/github.com/astaxie/beego/adminui.go deleted file mode 100644 index cdcdef33f..000000000 --- a/vendor/github.com/astaxie/beego/adminui.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -var indexTpl = ` -{{define "content"}} -

Beego Admin Dashboard

-

-For detail usage please check our document: -

-

-Toolbox -

-

-Live Monitor -

-{{.Content}} -{{end}}` - -var profillingTpl = ` -{{define "content"}} -

{{.Title}}

-
-
{{.Content}}
-
-{{end}}` - -var defaultScriptsTpl = `` - -var gcAjaxTpl = ` -{{define "scripts"}} - -{{end}} -` - -var qpsTpl = `{{define "content"}} -

Requests statistics

- - - - {{range .Content.Fields}} - - {{end}} - - - - - {{range $i, $elem := .Content.Data}} - - - - - - - - - - - {{end}} - - -
- {{.}} -
{{index $elem 0}}{{index $elem 1}}{{index $elem 2}}{{index $elem 4}}{{index $elem 6}}{{index $elem 8}}{{index $elem 10}}
-{{end}}` - -var configTpl = ` -{{define "content"}} -

Configurations

-
-{{range $index, $elem := .Content}}
-{{$index}}={{$elem}}
-{{end}}
-
-{{end}} -` - -var routerAndFilterTpl = `{{define "content"}} - - -

{{.Title}}

- -{{range .Content.Methods}} - -
-
{{.}}
-
- - - - {{range $.Content.Fields}} - - {{end}} - - - - - {{$slice := index $.Content.Data .}} - {{range $i, $elem := $slice}} - - - {{range $elem}} - - {{end}} - - - {{end}} - - -
- {{.}} -
- {{.}} -
-
-
-{{end}} - - -{{end}}` - -var tasksTpl = `{{define "content"}} - -

{{.Title}}

- -{{if .Message }} -{{ $messageType := index .Message 0}} -

-{{index .Message 1}} -

-{{end}} - - - - - -{{range .Content.Fields}} - -{{end}} - - - - -{{range $i, $slice := .Content.Data}} - - {{range $slice}} - - {{end}} - - -{{end}} - -
-{{.}} -
- {{.}} - - Run -
- -{{end}}` - -var healthCheckTpl = ` -{{define "content"}} - -

{{.Title}}

- - - -{{range .Content.Fields}} - -{{end}} - - - -{{range $i, $slice := .Content.Data}} - {{ $header := index $slice 0}} - {{ if eq "success" $header}} - - {{else if eq "error" $header}} - - {{else}} - - {{end}} - {{range $j, $elem := $slice}} - {{if ne $j 0}} - - {{end}} - {{end}} - - -{{end}} - - -
- {{.}} -
- {{$elem}} - - {{$header}} -
-{{end}}` - -// The base dashboardTpl -var dashboardTpl = ` - - - - - - - - - - -Welcome to Beego Admin Dashboard - - - - - - - - - - - - - -
-{{template "content" .}} -
- - - - - - - -{{template "scripts" .}} - - -` diff --git a/vendor/github.com/astaxie/beego/app.go b/vendor/github.com/astaxie/beego/app.go deleted file mode 100644 index 32ff159db..000000000 --- a/vendor/github.com/astaxie/beego/app.go +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/fcgi" - "os" - "path" - "strings" - "time" - - "github.com/astaxie/beego/grace" - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/utils" - "golang.org/x/crypto/acme/autocert" -) - -var ( - // BeeApp is an application instance - BeeApp *App -) - -func init() { - // create beego application - BeeApp = NewApp() -} - -// App defines beego application with a new PatternServeMux. -type App struct { - Handlers *ControllerRegister - Server *http.Server -} - -// NewApp returns a new beego application. -func NewApp() *App { - cr := NewControllerRegister() - app := &App{Handlers: cr, Server: &http.Server{}} - return app -} - -// MiddleWare function for http.Handler -type MiddleWare func(http.Handler) http.Handler - -// Run beego application. -func (app *App) Run(mws ...MiddleWare) { - addr := BConfig.Listen.HTTPAddr - - if BConfig.Listen.HTTPPort != 0 { - addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPAddr, BConfig.Listen.HTTPPort) - } - - var ( - err error - l net.Listener - endRunning = make(chan bool, 1) - ) - - // run cgi server - if BConfig.Listen.EnableFcgi { - if BConfig.Listen.EnableStdIo { - if err = fcgi.Serve(nil, app.Handlers); err == nil { // standard I/O - logs.Info("Use FCGI via standard I/O") - } else { - logs.Critical("Cannot use FCGI via standard I/O", err) - } - return - } - if BConfig.Listen.HTTPPort == 0 { - // remove the Socket file before start - if utils.FileExists(addr) { - os.Remove(addr) - } - l, err = net.Listen("unix", addr) - } else { - l, err = net.Listen("tcp", addr) - } - if err != nil { - logs.Critical("Listen: ", err) - } - if err = fcgi.Serve(l, app.Handlers); err != nil { - logs.Critical("fcgi.Serve: ", err) - } - return - } - - app.Server.Handler = app.Handlers - for i := len(mws) - 1; i >= 0; i-- { - if mws[i] == nil { - continue - } - app.Server.Handler = mws[i](app.Server.Handler) - } - app.Server.ReadTimeout = time.Duration(BConfig.Listen.ServerTimeOut) * time.Second - app.Server.WriteTimeout = time.Duration(BConfig.Listen.ServerTimeOut) * time.Second - app.Server.ErrorLog = logs.GetLogger("HTTP") - - // run graceful mode - if BConfig.Listen.Graceful { - httpsAddr := BConfig.Listen.HTTPSAddr - app.Server.Addr = httpsAddr - if BConfig.Listen.EnableHTTPS || BConfig.Listen.EnableMutualHTTPS { - go func() { - time.Sleep(1000 * time.Microsecond) - if BConfig.Listen.HTTPSPort != 0 { - httpsAddr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort) - app.Server.Addr = httpsAddr - } - server := grace.NewServer(httpsAddr, app.Handlers) - server.Server.ReadTimeout = app.Server.ReadTimeout - server.Server.WriteTimeout = app.Server.WriteTimeout - if BConfig.Listen.EnableMutualHTTPS { - if err := server.ListenAndServeMutualTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile, BConfig.Listen.TrustCaFile); err != nil { - logs.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid())) - time.Sleep(100 * time.Microsecond) - endRunning <- true - } - } else { - if BConfig.Listen.AutoTLS { - m := autocert.Manager{ - Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist(BConfig.Listen.Domains...), - Cache: autocert.DirCache(BConfig.Listen.TLSCacheDir), - } - app.Server.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate} - BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile = "", "" - } - if err := server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil { - logs.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid())) - time.Sleep(100 * time.Microsecond) - endRunning <- true - } - } - }() - } - if BConfig.Listen.EnableHTTP { - go func() { - server := grace.NewServer(addr, app.Handlers) - server.Server.ReadTimeout = app.Server.ReadTimeout - server.Server.WriteTimeout = app.Server.WriteTimeout - if BConfig.Listen.ListenTCP4 { - server.Network = "tcp4" - } - if err := server.ListenAndServe(); err != nil { - logs.Critical("ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid())) - time.Sleep(100 * time.Microsecond) - endRunning <- true - } - }() - } - <-endRunning - return - } - - // run normal mode - if BConfig.Listen.EnableHTTPS || BConfig.Listen.EnableMutualHTTPS { - go func() { - time.Sleep(1000 * time.Microsecond) - if BConfig.Listen.HTTPSPort != 0 { - app.Server.Addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort) - } else if BConfig.Listen.EnableHTTP { - BeeLogger.Info("Start https server error, conflict with http. Please reset https port") - return - } - logs.Info("https server Running on https://%s", app.Server.Addr) - if BConfig.Listen.AutoTLS { - m := autocert.Manager{ - Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist(BConfig.Listen.Domains...), - Cache: autocert.DirCache(BConfig.Listen.TLSCacheDir), - } - app.Server.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate} - BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile = "", "" - } else if BConfig.Listen.EnableMutualHTTPS { - pool := x509.NewCertPool() - data, err := ioutil.ReadFile(BConfig.Listen.TrustCaFile) - if err != nil { - BeeLogger.Info("MutualHTTPS should provide TrustCaFile") - return - } - pool.AppendCertsFromPEM(data) - app.Server.TLSConfig = &tls.Config{ - ClientCAs: pool, - ClientAuth: tls.RequireAndVerifyClientCert, - } - } - if err := app.Server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil { - logs.Critical("ListenAndServeTLS: ", err) - time.Sleep(100 * time.Microsecond) - endRunning <- true - } - }() - - } - if BConfig.Listen.EnableHTTP { - go func() { - app.Server.Addr = addr - logs.Info("http server Running on http://%s", app.Server.Addr) - if BConfig.Listen.ListenTCP4 { - ln, err := net.Listen("tcp4", app.Server.Addr) - if err != nil { - logs.Critical("ListenAndServe: ", err) - time.Sleep(100 * time.Microsecond) - endRunning <- true - return - } - if err = app.Server.Serve(ln); err != nil { - logs.Critical("ListenAndServe: ", err) - time.Sleep(100 * time.Microsecond) - endRunning <- true - return - } - } else { - if err := app.Server.ListenAndServe(); err != nil { - logs.Critical("ListenAndServe: ", err) - time.Sleep(100 * time.Microsecond) - endRunning <- true - } - } - }() - } - <-endRunning -} - -// Router adds a patterned controller handler to BeeApp. -// it's an alias method of App.Router. -// usage: -// simple router -// beego.Router("/admin", &admin.UserController{}) -// beego.Router("/admin/index", &admin.ArticleController{}) -// -// regex router -// -// beego.Router("/api/:id([0-9]+)", &controllers.RController{}) -// -// custom rules -// beego.Router("/api/list",&RestController{},"*:ListFood") -// beego.Router("/api/create",&RestController{},"post:CreateFood") -// beego.Router("/api/update",&RestController{},"put:UpdateFood") -// beego.Router("/api/delete",&RestController{},"delete:DeleteFood") -func Router(rootpath string, c ControllerInterface, mappingMethods ...string) *App { - BeeApp.Handlers.Add(rootpath, c, mappingMethods...) - return BeeApp -} - -// UnregisterFixedRoute unregisters the route with the specified fixedRoute. It is particularly useful -// in web applications that inherit most routes from a base webapp via the underscore -// import, and aim to overwrite only certain paths. -// The method parameter can be empty or "*" for all HTTP methods, or a particular -// method type (e.g. "GET" or "POST") for selective removal. -// -// Usage (replace "GET" with "*" for all methods): -// beego.UnregisterFixedRoute("/yourpreviouspath", "GET") -// beego.Router("/yourpreviouspath", yourControllerAddress, "get:GetNewPage") -func UnregisterFixedRoute(fixedRoute string, method string) *App { - subPaths := splitPath(fixedRoute) - if method == "" || method == "*" { - for m := range HTTPMETHOD { - if _, ok := BeeApp.Handlers.routers[m]; !ok { - continue - } - if BeeApp.Handlers.routers[m].prefix == strings.Trim(fixedRoute, "/ ") { - findAndRemoveSingleTree(BeeApp.Handlers.routers[m]) - continue - } - findAndRemoveTree(subPaths, BeeApp.Handlers.routers[m], m) - } - return BeeApp - } - // Single HTTP method - um := strings.ToUpper(method) - if _, ok := BeeApp.Handlers.routers[um]; ok { - if BeeApp.Handlers.routers[um].prefix == strings.Trim(fixedRoute, "/ ") { - findAndRemoveSingleTree(BeeApp.Handlers.routers[um]) - return BeeApp - } - findAndRemoveTree(subPaths, BeeApp.Handlers.routers[um], um) - } - return BeeApp -} - -func findAndRemoveTree(paths []string, entryPointTree *Tree, method string) { - for i := range entryPointTree.fixrouters { - if entryPointTree.fixrouters[i].prefix == paths[0] { - if len(paths) == 1 { - if len(entryPointTree.fixrouters[i].fixrouters) > 0 { - // If the route had children subtrees, remove just the functional leaf, - // to allow children to function as before - if len(entryPointTree.fixrouters[i].leaves) > 0 { - entryPointTree.fixrouters[i].leaves[0] = nil - entryPointTree.fixrouters[i].leaves = entryPointTree.fixrouters[i].leaves[1:] - } - } else { - // Remove the *Tree from the fixrouters slice - entryPointTree.fixrouters[i] = nil - - if i == len(entryPointTree.fixrouters)-1 { - entryPointTree.fixrouters = entryPointTree.fixrouters[:i] - } else { - entryPointTree.fixrouters = append(entryPointTree.fixrouters[:i], entryPointTree.fixrouters[i+1:len(entryPointTree.fixrouters)]...) - } - } - return - } - findAndRemoveTree(paths[1:], entryPointTree.fixrouters[i], method) - } - } -} - -func findAndRemoveSingleTree(entryPointTree *Tree) { - if entryPointTree == nil { - return - } - if len(entryPointTree.fixrouters) > 0 { - // If the route had children subtrees, remove just the functional leaf, - // to allow children to function as before - if len(entryPointTree.leaves) > 0 { - entryPointTree.leaves[0] = nil - entryPointTree.leaves = entryPointTree.leaves[1:] - } - } -} - -// Include will generate router file in the router/xxx.go from the controller's comments -// usage: -// beego.Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{}) -// type BankAccount struct{ -// beego.Controller -// } -// -// register the function -// func (b *BankAccount)Mapping(){ -// b.Mapping("ShowAccount" , b.ShowAccount) -// b.Mapping("ModifyAccount", b.ModifyAccount) -//} -// -// //@router /account/:id [get] -// func (b *BankAccount) ShowAccount(){ -// //logic -// } -// -// -// //@router /account/:id [post] -// func (b *BankAccount) ModifyAccount(){ -// //logic -// } -// -// the comments @router url methodlist -// url support all the function Router's pattern -// methodlist [get post head put delete options *] -func Include(cList ...ControllerInterface) *App { - BeeApp.Handlers.Include(cList...) - return BeeApp -} - -// RESTRouter adds a restful controller handler to BeeApp. -// its' controller implements beego.ControllerInterface and -// defines a param "pattern/:objectId" to visit each resource. -func RESTRouter(rootpath string, c ControllerInterface) *App { - Router(rootpath, c) - Router(path.Join(rootpath, ":objectId"), c) - return BeeApp -} - -// AutoRouter adds defined controller handler to BeeApp. -// it's same to App.AutoRouter. -// if beego.AddAuto(&MainContorlller{}) and MainController has methods List and Page, -// visit the url /main/list to exec List function or /main/page to exec Page function. -func AutoRouter(c ControllerInterface) *App { - BeeApp.Handlers.AddAuto(c) - return BeeApp -} - -// AutoPrefix adds controller handler to BeeApp with prefix. -// it's same to App.AutoRouterWithPrefix. -// if beego.AutoPrefix("/admin",&MainContorlller{}) and MainController has methods List and Page, -// visit the url /admin/main/list to exec List function or /admin/main/page to exec Page function. -func AutoPrefix(prefix string, c ControllerInterface) *App { - BeeApp.Handlers.AddAutoPrefix(prefix, c) - return BeeApp -} - -// Get used to register router for Get method -// usage: -// beego.Get("/", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Get(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Get(rootpath, f) - return BeeApp -} - -// Post used to register router for Post method -// usage: -// beego.Post("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Post(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Post(rootpath, f) - return BeeApp -} - -// Delete used to register router for Delete method -// usage: -// beego.Delete("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Delete(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Delete(rootpath, f) - return BeeApp -} - -// Put used to register router for Put method -// usage: -// beego.Put("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Put(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Put(rootpath, f) - return BeeApp -} - -// Head used to register router for Head method -// usage: -// beego.Head("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Head(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Head(rootpath, f) - return BeeApp -} - -// Options used to register router for Options method -// usage: -// beego.Options("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Options(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Options(rootpath, f) - return BeeApp -} - -// Patch used to register router for Patch method -// usage: -// beego.Patch("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Patch(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Patch(rootpath, f) - return BeeApp -} - -// Any used to register router for all methods -// usage: -// beego.Any("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func Any(rootpath string, f FilterFunc) *App { - BeeApp.Handlers.Any(rootpath, f) - return BeeApp -} - -// Handler used to register a Handler router -// usage: -// beego.Handler("/api", http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { -// fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) -// })) -func Handler(rootpath string, h http.Handler, options ...interface{}) *App { - BeeApp.Handlers.Handler(rootpath, h, options...) - return BeeApp -} - -// InsertFilter adds a FilterFunc with pattern condition and action constant. -// The pos means action constant including -// beego.BeforeStatic, beego.BeforeRouter, beego.BeforeExec, beego.AfterExec and beego.FinishRouter. -// The bool params is for setting the returnOnOutput value (false allows multiple filters to execute) -func InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) *App { - BeeApp.Handlers.InsertFilter(pattern, pos, filter, params...) - return BeeApp -} diff --git a/vendor/github.com/astaxie/beego/beego.go b/vendor/github.com/astaxie/beego/beego.go deleted file mode 100644 index ff89f2f51..000000000 --- a/vendor/github.com/astaxie/beego/beego.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "os" - "path/filepath" - "strconv" - "strings" -) - -const ( - // VERSION represent beego web framework version. - VERSION = "1.11.1" - - // DEV is for develop - DEV = "dev" - // PROD is for production - PROD = "prod" -) - -// M is Map shortcut -type M map[string]interface{} - -// Hook function to run -type hookfunc func() error - -var ( - hooks = make([]hookfunc, 0) //hook function slice to store the hookfunc -) - -// AddAPPStartHook is used to register the hookfunc -// The hookfuncs will run in beego.Run() -// such as initiating session , starting middleware , building template, starting admin control and so on. -func AddAPPStartHook(hf ...hookfunc) { - hooks = append(hooks, hf...) -} - -// Run beego application. -// beego.Run() default run on HttpPort -// beego.Run("localhost") -// beego.Run(":8089") -// beego.Run("127.0.0.1:8089") -func Run(params ...string) { - - initBeforeHTTPRun() - - if len(params) > 0 && params[0] != "" { - strs := strings.Split(params[0], ":") - if len(strs) > 0 && strs[0] != "" { - BConfig.Listen.HTTPAddr = strs[0] - } - if len(strs) > 1 && strs[1] != "" { - BConfig.Listen.HTTPPort, _ = strconv.Atoi(strs[1]) - } - - BConfig.Listen.Domains = params - } - - BeeApp.Run() -} - -// RunWithMiddleWares Run beego application with middlewares. -func RunWithMiddleWares(addr string, mws ...MiddleWare) { - initBeforeHTTPRun() - - strs := strings.Split(addr, ":") - if len(strs) > 0 && strs[0] != "" { - BConfig.Listen.HTTPAddr = strs[0] - BConfig.Listen.Domains = []string{strs[0]} - } - if len(strs) > 1 && strs[1] != "" { - BConfig.Listen.HTTPPort, _ = strconv.Atoi(strs[1]) - } - - BeeApp.Run(mws...) -} - -func initBeforeHTTPRun() { - //init hooks - AddAPPStartHook( - registerMime, - registerDefaultErrorHandler, - registerSession, - registerTemplate, - registerAdmin, - registerGzip, - ) - - for _, hk := range hooks { - if err := hk(); err != nil { - panic(err) - } - } -} - -// TestBeegoInit is for test package init -func TestBeegoInit(ap string) { - path := filepath.Join(ap, "conf", "app.conf") - os.Chdir(ap) - InitBeegoBeforeTest(path) -} - -// InitBeegoBeforeTest is for test package init -func InitBeegoBeforeTest(appConfigPath string) { - if err := LoadAppConfig(appConfigProvider, appConfigPath); err != nil { - panic(err) - } - BConfig.RunMode = "test" - initBeforeHTTPRun() -} diff --git a/vendor/github.com/astaxie/beego/config.go b/vendor/github.com/astaxie/beego/config.go deleted file mode 100644 index 7969dcea5..000000000 --- a/vendor/github.com/astaxie/beego/config.go +++ /dev/null @@ -1,510 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - - "github.com/astaxie/beego/config" - "github.com/astaxie/beego/context" - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/session" - "github.com/astaxie/beego/utils" -) - -// Config is the main struct for BConfig -type Config struct { - AppName string //Application name - RunMode string //Running Mode: dev | prod - RouterCaseSensitive bool - ServerName string - RecoverPanic bool - RecoverFunc func(*context.Context) - CopyRequestBody bool - EnableGzip bool - MaxMemory int64 - EnableErrorsShow bool - EnableErrorsRender bool - Listen Listen - WebConfig WebConfig - Log LogConfig -} - -// Listen holds for http and https related config -type Listen struct { - Graceful bool // Graceful means use graceful module to start the server - ServerTimeOut int64 - ListenTCP4 bool - EnableHTTP bool - HTTPAddr string - HTTPPort int - AutoTLS bool - Domains []string - TLSCacheDir string - EnableHTTPS bool - EnableMutualHTTPS bool - HTTPSAddr string - HTTPSPort int - HTTPSCertFile string - HTTPSKeyFile string - TrustCaFile string - EnableAdmin bool - AdminAddr string - AdminPort int - EnableFcgi bool - EnableStdIo bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O -} - -// WebConfig holds web related config -type WebConfig struct { - AutoRender bool - EnableDocs bool - FlashName string - FlashSeparator string - DirectoryIndex bool - StaticDir map[string]string - StaticExtensionsToGzip []string - TemplateLeft string - TemplateRight string - ViewsPath string - EnableXSRF bool - XSRFKey string - XSRFExpire int - Session SessionConfig -} - -// SessionConfig holds session related config -type SessionConfig struct { - SessionOn bool - SessionProvider string - SessionName string - SessionGCMaxLifetime int64 - SessionProviderConfig string - SessionCookieLifeTime int - SessionAutoSetCookie bool - SessionDomain string - SessionDisableHTTPOnly bool // used to allow for cross domain cookies/javascript cookies. - SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers - SessionNameInHTTPHeader string - SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params -} - -// LogConfig holds Log related config -type LogConfig struct { - AccessLogs bool - EnableStaticLogs bool //log static files requests default: false - AccessLogsFormat string //access log format: JSON_FORMAT, APACHE_FORMAT or empty string - FileLineNum bool - Outputs map[string]string // Store Adaptor : config -} - -var ( - // BConfig is the default config for Application - BConfig *Config - // AppConfig is the instance of Config, store the config information from file - AppConfig *beegoAppConfig - // AppPath is the absolute path to the app - AppPath string - // GlobalSessions is the instance for the session manager - GlobalSessions *session.Manager - - // appConfigPath is the path to the config files - appConfigPath string - // appConfigProvider is the provider for the config, default is ini - appConfigProvider = "ini" -) - -func init() { - BConfig = newBConfig() - var err error - if AppPath, err = filepath.Abs(filepath.Dir(os.Args[0])); err != nil { - panic(err) - } - workPath, err := os.Getwd() - if err != nil { - panic(err) - } - var filename = "app.conf" - if os.Getenv("BEEGO_RUNMODE") != "" { - filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf" - } - appConfigPath = filepath.Join(workPath, "conf", filename) - if !utils.FileExists(appConfigPath) { - appConfigPath = filepath.Join(AppPath, "conf", filename) - if !utils.FileExists(appConfigPath) { - AppConfig = &beegoAppConfig{innerConfig: config.NewFakeConfig()} - return - } - } - if err = parseConfig(appConfigPath); err != nil { - panic(err) - } -} - -func recoverPanic(ctx *context.Context) { - if err := recover(); err != nil { - if err == ErrAbort { - return - } - if !BConfig.RecoverPanic { - panic(err) - } - if BConfig.EnableErrorsShow { - if _, ok := ErrorMaps[fmt.Sprint(err)]; ok { - exception(fmt.Sprint(err), ctx) - return - } - } - var stack string - logs.Critical("the request url is ", ctx.Input.URL()) - logs.Critical("Handler crashed with error", err) - for i := 1; ; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - logs.Critical(fmt.Sprintf("%s:%d", file, line)) - stack = stack + fmt.Sprintln(fmt.Sprintf("%s:%d", file, line)) - } - if BConfig.RunMode == DEV && BConfig.EnableErrorsRender { - showErr(err, ctx, stack) - } - if ctx.Output.Status != 0 { - ctx.ResponseWriter.WriteHeader(ctx.Output.Status) - } else { - ctx.ResponseWriter.WriteHeader(500) - } - } -} - -func newBConfig() *Config { - return &Config{ - AppName: "beego", - RunMode: PROD, - RouterCaseSensitive: true, - ServerName: "beegoServer:" + VERSION, - RecoverPanic: true, - RecoverFunc: recoverPanic, - CopyRequestBody: false, - EnableGzip: false, - MaxMemory: 1 << 26, //64MB - EnableErrorsShow: true, - EnableErrorsRender: true, - Listen: Listen{ - Graceful: false, - ServerTimeOut: 0, - ListenTCP4: false, - EnableHTTP: true, - AutoTLS: false, - Domains: []string{}, - TLSCacheDir: ".", - HTTPAddr: "", - HTTPPort: 8080, - EnableHTTPS: false, - HTTPSAddr: "", - HTTPSPort: 10443, - HTTPSCertFile: "", - HTTPSKeyFile: "", - EnableAdmin: false, - AdminAddr: "", - AdminPort: 8088, - EnableFcgi: false, - EnableStdIo: false, - }, - WebConfig: WebConfig{ - AutoRender: true, - EnableDocs: false, - FlashName: "BEEGO_FLASH", - FlashSeparator: "BEEGOFLASH", - DirectoryIndex: false, - StaticDir: map[string]string{"/static": "static"}, - StaticExtensionsToGzip: []string{".css", ".js"}, - TemplateLeft: "{{", - TemplateRight: "}}", - ViewsPath: "views", - EnableXSRF: false, - XSRFKey: "beegoxsrf", - XSRFExpire: 0, - Session: SessionConfig{ - SessionOn: false, - SessionProvider: "memory", - SessionName: "beegosessionID", - SessionGCMaxLifetime: 3600, - SessionProviderConfig: "", - SessionDisableHTTPOnly: false, - SessionCookieLifeTime: 0, //set cookie default is the browser life - SessionAutoSetCookie: true, - SessionDomain: "", - SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers - SessionNameInHTTPHeader: "Beegosessionid", - SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params - }, - }, - Log: LogConfig{ - AccessLogs: false, - EnableStaticLogs: false, - AccessLogsFormat: "APACHE_FORMAT", - FileLineNum: true, - Outputs: map[string]string{"console": ""}, - }, - } -} - -// now only support ini, next will support json. -func parseConfig(appConfigPath string) (err error) { - AppConfig, err = newAppConfig(appConfigProvider, appConfigPath) - if err != nil { - return err - } - return assignConfig(AppConfig) -} - -func assignConfig(ac config.Configer) error { - for _, i := range []interface{}{BConfig, &BConfig.Listen, &BConfig.WebConfig, &BConfig.Log, &BConfig.WebConfig.Session} { - assignSingleConfig(i, ac) - } - // set the run mode first - if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" { - BConfig.RunMode = envRunMode - } else if runMode := ac.String("RunMode"); runMode != "" { - BConfig.RunMode = runMode - } - - if sd := ac.String("StaticDir"); sd != "" { - BConfig.WebConfig.StaticDir = map[string]string{} - sds := strings.Fields(sd) - for _, v := range sds { - if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 { - BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[1] - } else { - BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[0] - } - } - } - - if sgz := ac.String("StaticExtensionsToGzip"); sgz != "" { - extensions := strings.Split(sgz, ",") - fileExts := []string{} - for _, ext := range extensions { - ext = strings.TrimSpace(ext) - if ext == "" { - continue - } - if !strings.HasPrefix(ext, ".") { - ext = "." + ext - } - fileExts = append(fileExts, ext) - } - if len(fileExts) > 0 { - BConfig.WebConfig.StaticExtensionsToGzip = fileExts - } - } - - if lo := ac.String("LogOutputs"); lo != "" { - // if lo is not nil or empty - // means user has set his own LogOutputs - // clear the default setting to BConfig.Log.Outputs - BConfig.Log.Outputs = make(map[string]string) - los := strings.Split(lo, ";") - for _, v := range los { - if logType2Config := strings.SplitN(v, ",", 2); len(logType2Config) == 2 { - BConfig.Log.Outputs[logType2Config[0]] = logType2Config[1] - } else { - continue - } - } - } - - //init log - logs.Reset() - for adaptor, config := range BConfig.Log.Outputs { - err := logs.SetLogger(adaptor, config) - if err != nil { - fmt.Fprintln(os.Stderr, fmt.Sprintf("%s with the config %q got err:%s", adaptor, config, err.Error())) - } - } - logs.SetLogFuncCall(BConfig.Log.FileLineNum) - - return nil -} - -func assignSingleConfig(p interface{}, ac config.Configer) { - pt := reflect.TypeOf(p) - if pt.Kind() != reflect.Ptr { - return - } - pt = pt.Elem() - if pt.Kind() != reflect.Struct { - return - } - pv := reflect.ValueOf(p).Elem() - - for i := 0; i < pt.NumField(); i++ { - pf := pv.Field(i) - if !pf.CanSet() { - continue - } - name := pt.Field(i).Name - switch pf.Kind() { - case reflect.String: - pf.SetString(ac.DefaultString(name, pf.String())) - case reflect.Int, reflect.Int64: - pf.SetInt(ac.DefaultInt64(name, pf.Int())) - case reflect.Bool: - pf.SetBool(ac.DefaultBool(name, pf.Bool())) - case reflect.Struct: - default: - //do nothing here - } - } - -} - -// LoadAppConfig allow developer to apply a config file -func LoadAppConfig(adapterName, configPath string) error { - absConfigPath, err := filepath.Abs(configPath) - if err != nil { - return err - } - - if !utils.FileExists(absConfigPath) { - return fmt.Errorf("the target config file: %s don't exist", configPath) - } - - appConfigPath = absConfigPath - appConfigProvider = adapterName - - return parseConfig(appConfigPath) -} - -type beegoAppConfig struct { - innerConfig config.Configer -} - -func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, error) { - ac, err := config.NewConfig(appConfigProvider, appConfigPath) - if err != nil { - return nil, err - } - return &beegoAppConfig{ac}, nil -} - -func (b *beegoAppConfig) Set(key, val string) error { - if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil { - return err - } - return b.innerConfig.Set(key, val) -} - -func (b *beegoAppConfig) String(key string) string { - if v := b.innerConfig.String(BConfig.RunMode + "::" + key); v != "" { - return v - } - return b.innerConfig.String(key) -} - -func (b *beegoAppConfig) Strings(key string) []string { - if v := b.innerConfig.Strings(BConfig.RunMode + "::" + key); len(v) > 0 { - return v - } - return b.innerConfig.Strings(key) -} - -func (b *beegoAppConfig) Int(key string) (int, error) { - if v, err := b.innerConfig.Int(BConfig.RunMode + "::" + key); err == nil { - return v, nil - } - return b.innerConfig.Int(key) -} - -func (b *beegoAppConfig) Int64(key string) (int64, error) { - if v, err := b.innerConfig.Int64(BConfig.RunMode + "::" + key); err == nil { - return v, nil - } - return b.innerConfig.Int64(key) -} - -func (b *beegoAppConfig) Bool(key string) (bool, error) { - if v, err := b.innerConfig.Bool(BConfig.RunMode + "::" + key); err == nil { - return v, nil - } - return b.innerConfig.Bool(key) -} - -func (b *beegoAppConfig) Float(key string) (float64, error) { - if v, err := b.innerConfig.Float(BConfig.RunMode + "::" + key); err == nil { - return v, nil - } - return b.innerConfig.Float(key) -} - -func (b *beegoAppConfig) DefaultString(key string, defaultVal string) string { - if v := b.String(key); v != "" { - return v - } - return defaultVal -} - -func (b *beegoAppConfig) DefaultStrings(key string, defaultVal []string) []string { - if v := b.Strings(key); len(v) != 0 { - return v - } - return defaultVal -} - -func (b *beegoAppConfig) DefaultInt(key string, defaultVal int) int { - if v, err := b.Int(key); err == nil { - return v - } - return defaultVal -} - -func (b *beegoAppConfig) DefaultInt64(key string, defaultVal int64) int64 { - if v, err := b.Int64(key); err == nil { - return v - } - return defaultVal -} - -func (b *beegoAppConfig) DefaultBool(key string, defaultVal bool) bool { - if v, err := b.Bool(key); err == nil { - return v - } - return defaultVal -} - -func (b *beegoAppConfig) DefaultFloat(key string, defaultVal float64) float64 { - if v, err := b.Float(key); err == nil { - return v - } - return defaultVal -} - -func (b *beegoAppConfig) DIY(key string) (interface{}, error) { - return b.innerConfig.DIY(key) -} - -func (b *beegoAppConfig) GetSection(section string) (map[string]string, error) { - return b.innerConfig.GetSection(section) -} - -func (b *beegoAppConfig) SaveConfigFile(filename string) error { - return b.innerConfig.SaveConfigFile(filename) -} diff --git a/vendor/github.com/astaxie/beego/config/config.go b/vendor/github.com/astaxie/beego/config/config.go deleted file mode 100644 index bfd79e85d..000000000 --- a/vendor/github.com/astaxie/beego/config/config.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package config is used to parse config. -// Usage: -// import "github.com/astaxie/beego/config" -//Examples. -// -// cnf, err := config.NewConfig("ini", "config.conf") -// -// cnf APIS: -// -// cnf.Set(key, val string) error -// cnf.String(key string) string -// cnf.Strings(key string) []string -// cnf.Int(key string) (int, error) -// cnf.Int64(key string) (int64, error) -// cnf.Bool(key string) (bool, error) -// cnf.Float(key string) (float64, error) -// cnf.DefaultString(key string, defaultVal string) string -// cnf.DefaultStrings(key string, defaultVal []string) []string -// cnf.DefaultInt(key string, defaultVal int) int -// cnf.DefaultInt64(key string, defaultVal int64) int64 -// cnf.DefaultBool(key string, defaultVal bool) bool -// cnf.DefaultFloat(key string, defaultVal float64) float64 -// cnf.DIY(key string) (interface{}, error) -// cnf.GetSection(section string) (map[string]string, error) -// cnf.SaveConfigFile(filename string) error -//More docs http://beego.me/docs/module/config.md -package config - -import ( - "fmt" - "os" - "reflect" - "time" -) - -// Configer defines how to get and set value from configuration raw data. -type Configer interface { - Set(key, val string) error //support section::key type in given key when using ini type. - String(key string) string //support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same. - Strings(key string) []string //get string slice - Int(key string) (int, error) - Int64(key string) (int64, error) - Bool(key string) (bool, error) - Float(key string) (float64, error) - DefaultString(key string, defaultVal string) string // support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same. - DefaultStrings(key string, defaultVal []string) []string //get string slice - DefaultInt(key string, defaultVal int) int - DefaultInt64(key string, defaultVal int64) int64 - DefaultBool(key string, defaultVal bool) bool - DefaultFloat(key string, defaultVal float64) float64 - DIY(key string) (interface{}, error) - GetSection(section string) (map[string]string, error) - SaveConfigFile(filename string) error -} - -// Config is the adapter interface for parsing config file to get raw data to Configer. -type Config interface { - Parse(key string) (Configer, error) - ParseData(data []byte) (Configer, error) -} - -var adapters = make(map[string]Config) - -// Register makes a config adapter available by the adapter name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, adapter Config) { - if adapter == nil { - panic("config: Register adapter is nil") - } - if _, ok := adapters[name]; ok { - panic("config: Register called twice for adapter " + name) - } - adapters[name] = adapter -} - -// NewConfig adapterName is ini/json/xml/yaml. -// filename is the config file path. -func NewConfig(adapterName, filename string) (Configer, error) { - adapter, ok := adapters[adapterName] - if !ok { - return nil, fmt.Errorf("config: unknown adaptername %q (forgotten import?)", adapterName) - } - return adapter.Parse(filename) -} - -// NewConfigData adapterName is ini/json/xml/yaml. -// data is the config data. -func NewConfigData(adapterName string, data []byte) (Configer, error) { - adapter, ok := adapters[adapterName] - if !ok { - return nil, fmt.Errorf("config: unknown adaptername %q (forgotten import?)", adapterName) - } - return adapter.ParseData(data) -} - -// ExpandValueEnvForMap convert all string value with environment variable. -func ExpandValueEnvForMap(m map[string]interface{}) map[string]interface{} { - for k, v := range m { - switch value := v.(type) { - case string: - m[k] = ExpandValueEnv(value) - case map[string]interface{}: - m[k] = ExpandValueEnvForMap(value) - case map[string]string: - for k2, v2 := range value { - value[k2] = ExpandValueEnv(v2) - } - m[k] = value - } - } - return m -} - -// ExpandValueEnv returns value of convert with environment variable. -// -// Return environment variable if value start with "${" and end with "}". -// Return default value if environment variable is empty or not exist. -// -// It accept value formats "${env}" , "${env||}}" , "${env||defaultValue}" , "defaultvalue". -// Examples: -// v1 := config.ExpandValueEnv("${GOPATH}") // return the GOPATH environment variable. -// v2 := config.ExpandValueEnv("${GOAsta||/usr/local/go}") // return the default value "/usr/local/go/". -// v3 := config.ExpandValueEnv("Astaxie") // return the value "Astaxie". -func ExpandValueEnv(value string) (realValue string) { - realValue = value - - vLen := len(value) - // 3 = ${} - if vLen < 3 { - return - } - // Need start with "${" and end with "}", then return. - if value[0] != '$' || value[1] != '{' || value[vLen-1] != '}' { - return - } - - key := "" - defaultV := "" - // value start with "${" - for i := 2; i < vLen; i++ { - if value[i] == '|' && (i+1 < vLen && value[i+1] == '|') { - key = value[2:i] - defaultV = value[i+2 : vLen-1] // other string is default value. - break - } else if value[i] == '}' { - key = value[2:i] - break - } - } - - realValue = os.Getenv(key) - if realValue == "" { - realValue = defaultV - } - - return -} - -// ParseBool returns the boolean value represented by the string. -// -// It accepts 1, 1.0, t, T, TRUE, true, True, YES, yes, Yes,Y, y, ON, on, On, -// 0, 0.0, f, F, FALSE, false, False, NO, no, No, N,n, OFF, off, Off. -// Any other value returns an error. -func ParseBool(val interface{}) (value bool, err error) { - if val != nil { - switch v := val.(type) { - case bool: - return v, nil - case string: - switch v { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "Y", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "N", "n", "OFF", "off", "Off": - return false, nil - } - case int8, int32, int64: - strV := fmt.Sprintf("%d", v) - if strV == "1" { - return true, nil - } else if strV == "0" { - return false, nil - } - case float64: - if v == 1.0 { - return true, nil - } else if v == 0.0 { - return false, nil - } - } - return false, fmt.Errorf("parsing %q: invalid syntax", val) - } - return false, fmt.Errorf("parsing : invalid syntax") -} - -// ToString converts values of any type to string. -func ToString(x interface{}) string { - switch y := x.(type) { - - // Handle dates with special logic - // This needs to come above the fmt.Stringer - // test since time.Time's have a .String() - // method - case time.Time: - return y.Format("A Monday") - - // Handle type string - case string: - return y - - // Handle type with .String() method - case fmt.Stringer: - return y.String() - - // Handle type with .Error() method - case error: - return y.Error() - - } - - // Handle named string type - if v := reflect.ValueOf(x); v.Kind() == reflect.String { - return v.String() - } - - // Fallback to fmt package for anything else like numeric types - return fmt.Sprint(x) -} diff --git a/vendor/github.com/astaxie/beego/config/fake.go b/vendor/github.com/astaxie/beego/config/fake.go deleted file mode 100644 index d21ab820d..000000000 --- a/vendor/github.com/astaxie/beego/config/fake.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "errors" - "strconv" - "strings" -) - -type fakeConfigContainer struct { - data map[string]string -} - -func (c *fakeConfigContainer) getData(key string) string { - return c.data[strings.ToLower(key)] -} - -func (c *fakeConfigContainer) Set(key, val string) error { - c.data[strings.ToLower(key)] = val - return nil -} - -func (c *fakeConfigContainer) String(key string) string { - return c.getData(key) -} - -func (c *fakeConfigContainer) DefaultString(key string, defaultval string) string { - v := c.String(key) - if v == "" { - return defaultval - } - return v -} - -func (c *fakeConfigContainer) Strings(key string) []string { - v := c.String(key) - if v == "" { - return nil - } - return strings.Split(v, ";") -} - -func (c *fakeConfigContainer) DefaultStrings(key string, defaultval []string) []string { - v := c.Strings(key) - if v == nil { - return defaultval - } - return v -} - -func (c *fakeConfigContainer) Int(key string) (int, error) { - return strconv.Atoi(c.getData(key)) -} - -func (c *fakeConfigContainer) DefaultInt(key string, defaultval int) int { - v, err := c.Int(key) - if err != nil { - return defaultval - } - return v -} - -func (c *fakeConfigContainer) Int64(key string) (int64, error) { - return strconv.ParseInt(c.getData(key), 10, 64) -} - -func (c *fakeConfigContainer) DefaultInt64(key string, defaultval int64) int64 { - v, err := c.Int64(key) - if err != nil { - return defaultval - } - return v -} - -func (c *fakeConfigContainer) Bool(key string) (bool, error) { - return ParseBool(c.getData(key)) -} - -func (c *fakeConfigContainer) DefaultBool(key string, defaultval bool) bool { - v, err := c.Bool(key) - if err != nil { - return defaultval - } - return v -} - -func (c *fakeConfigContainer) Float(key string) (float64, error) { - return strconv.ParseFloat(c.getData(key), 64) -} - -func (c *fakeConfigContainer) DefaultFloat(key string, defaultval float64) float64 { - v, err := c.Float(key) - if err != nil { - return defaultval - } - return v -} - -func (c *fakeConfigContainer) DIY(key string) (interface{}, error) { - if v, ok := c.data[strings.ToLower(key)]; ok { - return v, nil - } - return nil, errors.New("key not find") -} - -func (c *fakeConfigContainer) GetSection(section string) (map[string]string, error) { - return nil, errors.New("not implement in the fakeConfigContainer") -} - -func (c *fakeConfigContainer) SaveConfigFile(filename string) error { - return errors.New("not implement in the fakeConfigContainer") -} - -var _ Configer = new(fakeConfigContainer) - -// NewFakeConfig return a fake Configer -func NewFakeConfig() Configer { - return &fakeConfigContainer{ - data: make(map[string]string), - } -} diff --git a/vendor/github.com/astaxie/beego/config/ini.go b/vendor/github.com/astaxie/beego/config/ini.go deleted file mode 100644 index 002e5e056..000000000 --- a/vendor/github.com/astaxie/beego/config/ini.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "os" - "os/user" - "path/filepath" - "strconv" - "strings" - "sync" -) - -var ( - defaultSection = "default" // default section means if some ini items not in a section, make them in default section, - bNumComment = []byte{'#'} // number signal - bSemComment = []byte{';'} // semicolon signal - bEmpty = []byte{} - bEqual = []byte{'='} // equal signal - bDQuote = []byte{'"'} // quote signal - sectionStart = []byte{'['} // section start signal - sectionEnd = []byte{']'} // section end signal - lineBreak = "\n" -) - -// IniConfig implements Config to parse ini file. -type IniConfig struct { -} - -// Parse creates a new Config and parses the file configuration from the named file. -func (ini *IniConfig) Parse(name string) (Configer, error) { - return ini.parseFile(name) -} - -func (ini *IniConfig) parseFile(name string) (*IniConfigContainer, error) { - data, err := ioutil.ReadFile(name) - if err != nil { - return nil, err - } - - return ini.parseData(filepath.Dir(name), data) -} - -func (ini *IniConfig) parseData(dir string, data []byte) (*IniConfigContainer, error) { - cfg := &IniConfigContainer{ - data: make(map[string]map[string]string), - sectionComment: make(map[string]string), - keyComment: make(map[string]string), - RWMutex: sync.RWMutex{}, - } - cfg.Lock() - defer cfg.Unlock() - - var comment bytes.Buffer - buf := bufio.NewReader(bytes.NewBuffer(data)) - // check the BOM - head, err := buf.Peek(3) - if err == nil && head[0] == 239 && head[1] == 187 && head[2] == 191 { - for i := 1; i <= 3; i++ { - buf.ReadByte() - } - } - section := defaultSection - tmpBuf := bytes.NewBuffer(nil) - for { - tmpBuf.Reset() - - shouldBreak := false - for { - tmp, isPrefix, err := buf.ReadLine() - if err == io.EOF { - shouldBreak = true - break - } - - //It might be a good idea to throw a error on all unknonw errors? - if _, ok := err.(*os.PathError); ok { - return nil, err - } - - tmpBuf.Write(tmp) - if isPrefix { - continue - } - - if !isPrefix { - break - } - } - if shouldBreak { - break - } - - line := tmpBuf.Bytes() - line = bytes.TrimSpace(line) - if bytes.Equal(line, bEmpty) { - continue - } - var bComment []byte - switch { - case bytes.HasPrefix(line, bNumComment): - bComment = bNumComment - case bytes.HasPrefix(line, bSemComment): - bComment = bSemComment - } - if bComment != nil { - line = bytes.TrimLeft(line, string(bComment)) - // Need append to a new line if multi-line comments. - if comment.Len() > 0 { - comment.WriteByte('\n') - } - comment.Write(line) - continue - } - - if bytes.HasPrefix(line, sectionStart) && bytes.HasSuffix(line, sectionEnd) { - section = strings.ToLower(string(line[1 : len(line)-1])) // section name case insensitive - if comment.Len() > 0 { - cfg.sectionComment[section] = comment.String() - comment.Reset() - } - if _, ok := cfg.data[section]; !ok { - cfg.data[section] = make(map[string]string) - } - continue - } - - if _, ok := cfg.data[section]; !ok { - cfg.data[section] = make(map[string]string) - } - keyValue := bytes.SplitN(line, bEqual, 2) - - key := string(bytes.TrimSpace(keyValue[0])) // key name case insensitive - key = strings.ToLower(key) - - // handle include "other.conf" - if len(keyValue) == 1 && strings.HasPrefix(key, "include") { - - includefiles := strings.Fields(key) - if includefiles[0] == "include" && len(includefiles) == 2 { - - otherfile := strings.Trim(includefiles[1], "\"") - if !filepath.IsAbs(otherfile) { - otherfile = filepath.Join(dir, otherfile) - } - - i, err := ini.parseFile(otherfile) - if err != nil { - return nil, err - } - - for sec, dt := range i.data { - if _, ok := cfg.data[sec]; !ok { - cfg.data[sec] = make(map[string]string) - } - for k, v := range dt { - cfg.data[sec][k] = v - } - } - - for sec, comm := range i.sectionComment { - cfg.sectionComment[sec] = comm - } - - for k, comm := range i.keyComment { - cfg.keyComment[k] = comm - } - - continue - } - } - - if len(keyValue) != 2 { - return nil, errors.New("read the content error: \"" + string(line) + "\", should key = val") - } - val := bytes.TrimSpace(keyValue[1]) - if bytes.HasPrefix(val, bDQuote) { - val = bytes.Trim(val, `"`) - } - - cfg.data[section][key] = ExpandValueEnv(string(val)) - if comment.Len() > 0 { - cfg.keyComment[section+"."+key] = comment.String() - comment.Reset() - } - - } - return cfg, nil -} - -// ParseData parse ini the data -// When include other.conf,other.conf is either absolute directory -// or under beego in default temporary directory(/tmp/beego[-username]). -func (ini *IniConfig) ParseData(data []byte) (Configer, error) { - dir := "beego" - currentUser, err := user.Current() - if err == nil { - dir = "beego-" + currentUser.Username - } - dir = filepath.Join(os.TempDir(), dir) - if err = os.MkdirAll(dir, os.ModePerm); err != nil { - return nil, err - } - - return ini.parseData(dir, data) -} - -// IniConfigContainer A Config represents the ini configuration. -// When set and get value, support key as section:name type. -type IniConfigContainer struct { - data map[string]map[string]string // section=> key:val - sectionComment map[string]string // section : comment - keyComment map[string]string // id: []{comment, key...}; id 1 is for main comment. - sync.RWMutex -} - -// Bool returns the boolean value for a given key. -func (c *IniConfigContainer) Bool(key string) (bool, error) { - return ParseBool(c.getdata(key)) -} - -// DefaultBool returns the boolean value for a given key. -// if err != nil return defaultval -func (c *IniConfigContainer) DefaultBool(key string, defaultval bool) bool { - v, err := c.Bool(key) - if err != nil { - return defaultval - } - return v -} - -// Int returns the integer value for a given key. -func (c *IniConfigContainer) Int(key string) (int, error) { - return strconv.Atoi(c.getdata(key)) -} - -// DefaultInt returns the integer value for a given key. -// if err != nil return defaultval -func (c *IniConfigContainer) DefaultInt(key string, defaultval int) int { - v, err := c.Int(key) - if err != nil { - return defaultval - } - return v -} - -// Int64 returns the int64 value for a given key. -func (c *IniConfigContainer) Int64(key string) (int64, error) { - return strconv.ParseInt(c.getdata(key), 10, 64) -} - -// DefaultInt64 returns the int64 value for a given key. -// if err != nil return defaultval -func (c *IniConfigContainer) DefaultInt64(key string, defaultval int64) int64 { - v, err := c.Int64(key) - if err != nil { - return defaultval - } - return v -} - -// Float returns the float value for a given key. -func (c *IniConfigContainer) Float(key string) (float64, error) { - return strconv.ParseFloat(c.getdata(key), 64) -} - -// DefaultFloat returns the float64 value for a given key. -// if err != nil return defaultval -func (c *IniConfigContainer) DefaultFloat(key string, defaultval float64) float64 { - v, err := c.Float(key) - if err != nil { - return defaultval - } - return v -} - -// String returns the string value for a given key. -func (c *IniConfigContainer) String(key string) string { - return c.getdata(key) -} - -// DefaultString returns the string value for a given key. -// if err != nil return defaultval -func (c *IniConfigContainer) DefaultString(key string, defaultval string) string { - v := c.String(key) - if v == "" { - return defaultval - } - return v -} - -// Strings returns the []string value for a given key. -// Return nil if config value does not exist or is empty. -func (c *IniConfigContainer) Strings(key string) []string { - v := c.String(key) - if v == "" { - return nil - } - return strings.Split(v, ";") -} - -// DefaultStrings returns the []string value for a given key. -// if err != nil return defaultval -func (c *IniConfigContainer) DefaultStrings(key string, defaultval []string) []string { - v := c.Strings(key) - if v == nil { - return defaultval - } - return v -} - -// GetSection returns map for the given section -func (c *IniConfigContainer) GetSection(section string) (map[string]string, error) { - if v, ok := c.data[section]; ok { - return v, nil - } - return nil, errors.New("not exist section") -} - -// SaveConfigFile save the config into file. -// -// BUG(env): The environment variable config item will be saved with real value in SaveConfigFile Function. -func (c *IniConfigContainer) SaveConfigFile(filename string) (err error) { - // Write configuration file by filename. - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - // Get section or key comments. Fixed #1607 - getCommentStr := func(section, key string) string { - var ( - comment string - ok bool - ) - if len(key) == 0 { - comment, ok = c.sectionComment[section] - } else { - comment, ok = c.keyComment[section+"."+key] - } - - if ok { - // Empty comment - if len(comment) == 0 || len(strings.TrimSpace(comment)) == 0 { - return string(bNumComment) - } - prefix := string(bNumComment) - // Add the line head character "#" - return prefix + strings.Replace(comment, lineBreak, lineBreak+prefix, -1) - } - return "" - } - - buf := bytes.NewBuffer(nil) - // Save default section at first place - if dt, ok := c.data[defaultSection]; ok { - for key, val := range dt { - if key != " " { - // Write key comments. - if v := getCommentStr(defaultSection, key); len(v) > 0 { - if _, err = buf.WriteString(v + lineBreak); err != nil { - return err - } - } - - // Write key and value. - if _, err = buf.WriteString(key + string(bEqual) + val + lineBreak); err != nil { - return err - } - } - } - - // Put a line between sections. - if _, err = buf.WriteString(lineBreak); err != nil { - return err - } - } - // Save named sections - for section, dt := range c.data { - if section != defaultSection { - // Write section comments. - if v := getCommentStr(section, ""); len(v) > 0 { - if _, err = buf.WriteString(v + lineBreak); err != nil { - return err - } - } - - // Write section name. - if _, err = buf.WriteString(string(sectionStart) + section + string(sectionEnd) + lineBreak); err != nil { - return err - } - - for key, val := range dt { - if key != " " { - // Write key comments. - if v := getCommentStr(section, key); len(v) > 0 { - if _, err = buf.WriteString(v + lineBreak); err != nil { - return err - } - } - - // Write key and value. - if _, err = buf.WriteString(key + string(bEqual) + val + lineBreak); err != nil { - return err - } - } - } - - // Put a line between sections. - if _, err = buf.WriteString(lineBreak); err != nil { - return err - } - } - } - _, err = buf.WriteTo(f) - return err -} - -// Set writes a new value for key. -// if write to one section, the key need be "section::key". -// if the section is not existed, it panics. -func (c *IniConfigContainer) Set(key, value string) error { - c.Lock() - defer c.Unlock() - if len(key) == 0 { - return errors.New("key is empty") - } - - var ( - section, k string - sectionKey = strings.Split(strings.ToLower(key), "::") - ) - - if len(sectionKey) >= 2 { - section = sectionKey[0] - k = sectionKey[1] - } else { - section = defaultSection - k = sectionKey[0] - } - - if _, ok := c.data[section]; !ok { - c.data[section] = make(map[string]string) - } - c.data[section][k] = value - return nil -} - -// DIY returns the raw value by a given key. -func (c *IniConfigContainer) DIY(key string) (v interface{}, err error) { - if v, ok := c.data[strings.ToLower(key)]; ok { - return v, nil - } - return v, errors.New("key not find") -} - -// section.key or key -func (c *IniConfigContainer) getdata(key string) string { - if len(key) == 0 { - return "" - } - c.RLock() - defer c.RUnlock() - - var ( - section, k string - sectionKey = strings.Split(strings.ToLower(key), "::") - ) - if len(sectionKey) >= 2 { - section = sectionKey[0] - k = sectionKey[1] - } else { - section = defaultSection - k = sectionKey[0] - } - if v, ok := c.data[section]; ok { - if vv, ok := v[k]; ok { - return vv - } - } - return "" -} - -func init() { - Register("ini", &IniConfig{}) -} diff --git a/vendor/github.com/astaxie/beego/config/json.go b/vendor/github.com/astaxie/beego/config/json.go deleted file mode 100644 index 74c18c9c1..000000000 --- a/vendor/github.com/astaxie/beego/config/json.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" - "sync" -) - -// JSONConfig is a json config parser and implements Config interface. -type JSONConfig struct { -} - -// Parse returns a ConfigContainer with parsed json config map. -func (js *JSONConfig) Parse(filename string) (Configer, error) { - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - content, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - return js.ParseData(content) -} - -// ParseData returns a ConfigContainer with json string -func (js *JSONConfig) ParseData(data []byte) (Configer, error) { - x := &JSONConfigContainer{ - data: make(map[string]interface{}), - } - err := json.Unmarshal(data, &x.data) - if err != nil { - var wrappingArray []interface{} - err2 := json.Unmarshal(data, &wrappingArray) - if err2 != nil { - return nil, err - } - x.data["rootArray"] = wrappingArray - } - - x.data = ExpandValueEnvForMap(x.data) - - return x, nil -} - -// JSONConfigContainer A Config represents the json configuration. -// Only when get value, support key as section:name type. -type JSONConfigContainer struct { - data map[string]interface{} - sync.RWMutex -} - -// Bool returns the boolean value for a given key. -func (c *JSONConfigContainer) Bool(key string) (bool, error) { - val := c.getData(key) - if val != nil { - return ParseBool(val) - } - return false, fmt.Errorf("not exist key: %q", key) -} - -// DefaultBool return the bool value if has no error -// otherwise return the defaultval -func (c *JSONConfigContainer) DefaultBool(key string, defaultval bool) bool { - if v, err := c.Bool(key); err == nil { - return v - } - return defaultval -} - -// Int returns the integer value for a given key. -func (c *JSONConfigContainer) Int(key string) (int, error) { - val := c.getData(key) - if val != nil { - if v, ok := val.(float64); ok { - return int(v), nil - } - return 0, errors.New("not int value") - } - return 0, errors.New("not exist key:" + key) -} - -// DefaultInt returns the integer value for a given key. -// if err != nil return defaultval -func (c *JSONConfigContainer) DefaultInt(key string, defaultval int) int { - if v, err := c.Int(key); err == nil { - return v - } - return defaultval -} - -// Int64 returns the int64 value for a given key. -func (c *JSONConfigContainer) Int64(key string) (int64, error) { - val := c.getData(key) - if val != nil { - if v, ok := val.(float64); ok { - return int64(v), nil - } - return 0, errors.New("not int64 value") - } - return 0, errors.New("not exist key:" + key) -} - -// DefaultInt64 returns the int64 value for a given key. -// if err != nil return defaultval -func (c *JSONConfigContainer) DefaultInt64(key string, defaultval int64) int64 { - if v, err := c.Int64(key); err == nil { - return v - } - return defaultval -} - -// Float returns the float value for a given key. -func (c *JSONConfigContainer) Float(key string) (float64, error) { - val := c.getData(key) - if val != nil { - if v, ok := val.(float64); ok { - return v, nil - } - return 0.0, errors.New("not float64 value") - } - return 0.0, errors.New("not exist key:" + key) -} - -// DefaultFloat returns the float64 value for a given key. -// if err != nil return defaultval -func (c *JSONConfigContainer) DefaultFloat(key string, defaultval float64) float64 { - if v, err := c.Float(key); err == nil { - return v - } - return defaultval -} - -// String returns the string value for a given key. -func (c *JSONConfigContainer) String(key string) string { - val := c.getData(key) - if val != nil { - if v, ok := val.(string); ok { - return v - } - } - return "" -} - -// DefaultString returns the string value for a given key. -// if err != nil return defaultval -func (c *JSONConfigContainer) DefaultString(key string, defaultval string) string { - // TODO FIXME should not use "" to replace non existence - if v := c.String(key); v != "" { - return v - } - return defaultval -} - -// Strings returns the []string value for a given key. -func (c *JSONConfigContainer) Strings(key string) []string { - stringVal := c.String(key) - if stringVal == "" { - return nil - } - return strings.Split(c.String(key), ";") -} - -// DefaultStrings returns the []string value for a given key. -// if err != nil return defaultval -func (c *JSONConfigContainer) DefaultStrings(key string, defaultval []string) []string { - if v := c.Strings(key); v != nil { - return v - } - return defaultval -} - -// GetSection returns map for the given section -func (c *JSONConfigContainer) GetSection(section string) (map[string]string, error) { - if v, ok := c.data[section]; ok { - return v.(map[string]string), nil - } - return nil, errors.New("nonexist section " + section) -} - -// SaveConfigFile save the config into file -func (c *JSONConfigContainer) SaveConfigFile(filename string) (err error) { - // Write configuration file by filename. - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - b, err := json.MarshalIndent(c.data, "", " ") - if err != nil { - return err - } - _, err = f.Write(b) - return err -} - -// Set writes a new value for key. -func (c *JSONConfigContainer) Set(key, val string) error { - c.Lock() - defer c.Unlock() - c.data[key] = val - return nil -} - -// DIY returns the raw value by a given key. -func (c *JSONConfigContainer) DIY(key string) (v interface{}, err error) { - val := c.getData(key) - if val != nil { - return val, nil - } - return nil, errors.New("not exist key") -} - -// section.key or key -func (c *JSONConfigContainer) getData(key string) interface{} { - if len(key) == 0 { - return nil - } - - c.RLock() - defer c.RUnlock() - - sectionKeys := strings.Split(key, "::") - if len(sectionKeys) >= 2 { - curValue, ok := c.data[sectionKeys[0]] - if !ok { - return nil - } - for _, key := range sectionKeys[1:] { - if v, ok := curValue.(map[string]interface{}); ok { - if curValue, ok = v[key]; !ok { - return nil - } - } - } - return curValue - } - if v, ok := c.data[key]; ok { - return v - } - return nil -} - -func init() { - Register("json", &JSONConfig{}) -} diff --git a/vendor/github.com/astaxie/beego/context/acceptencoder.go b/vendor/github.com/astaxie/beego/context/acceptencoder.go deleted file mode 100644 index b4e2492c0..000000000 --- a/vendor/github.com/astaxie/beego/context/acceptencoder.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2015 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package context - -import ( - "bytes" - "compress/flate" - "compress/gzip" - "compress/zlib" - "io" - "net/http" - "os" - "strconv" - "strings" - "sync" -) - -var ( - //Default size==20B same as nginx - defaultGzipMinLength = 20 - //Content will only be compressed if content length is either unknown or greater than gzipMinLength. - gzipMinLength = defaultGzipMinLength - //The compression level used for deflate compression. (0-9). - gzipCompressLevel int - //List of HTTP methods to compress. If not set, only GET requests are compressed. - includedMethods map[string]bool - getMethodOnly bool -) - -// InitGzip init the gzipcompress -func InitGzip(minLength, compressLevel int, methods []string) { - if minLength >= 0 { - gzipMinLength = minLength - } - gzipCompressLevel = compressLevel - if gzipCompressLevel < flate.NoCompression || gzipCompressLevel > flate.BestCompression { - gzipCompressLevel = flate.BestSpeed - } - getMethodOnly = (len(methods) == 0) || (len(methods) == 1 && strings.ToUpper(methods[0]) == "GET") - includedMethods = make(map[string]bool, len(methods)) - for _, v := range methods { - includedMethods[strings.ToUpper(v)] = true - } -} - -type resetWriter interface { - io.Writer - Reset(w io.Writer) -} - -type nopResetWriter struct { - io.Writer -} - -func (n nopResetWriter) Reset(w io.Writer) { - //do nothing -} - -type acceptEncoder struct { - name string - levelEncode func(int) resetWriter - customCompressLevelPool *sync.Pool - bestCompressionPool *sync.Pool -} - -func (ac acceptEncoder) encode(wr io.Writer, level int) resetWriter { - if ac.customCompressLevelPool == nil || ac.bestCompressionPool == nil { - return nopResetWriter{wr} - } - var rwr resetWriter - switch level { - case flate.BestSpeed: - rwr = ac.customCompressLevelPool.Get().(resetWriter) - case flate.BestCompression: - rwr = ac.bestCompressionPool.Get().(resetWriter) - default: - rwr = ac.levelEncode(level) - } - rwr.Reset(wr) - return rwr -} - -func (ac acceptEncoder) put(wr resetWriter, level int) { - if ac.customCompressLevelPool == nil || ac.bestCompressionPool == nil { - return - } - wr.Reset(nil) - - //notice - //compressionLevel==BestCompression DOES NOT MATTER - //sync.Pool will not memory leak - - switch level { - case gzipCompressLevel: - ac.customCompressLevelPool.Put(wr) - case flate.BestCompression: - ac.bestCompressionPool.Put(wr) - } -} - -var ( - noneCompressEncoder = acceptEncoder{"", nil, nil, nil} - gzipCompressEncoder = acceptEncoder{ - name: "gzip", - levelEncode: func(level int) resetWriter { wr, _ := gzip.NewWriterLevel(nil, level); return wr }, - customCompressLevelPool: &sync.Pool{New: func() interface{} { wr, _ := gzip.NewWriterLevel(nil, gzipCompressLevel); return wr }}, - bestCompressionPool: &sync.Pool{New: func() interface{} { wr, _ := gzip.NewWriterLevel(nil, flate.BestCompression); return wr }}, - } - - //according to the sec :http://tools.ietf.org/html/rfc2616#section-3.5 ,the deflate compress in http is zlib indeed - //deflate - //The "zlib" format defined in RFC 1950 [31] in combination with - //the "deflate" compression mechanism described in RFC 1951 [29]. - deflateCompressEncoder = acceptEncoder{ - name: "deflate", - levelEncode: func(level int) resetWriter { wr, _ := zlib.NewWriterLevel(nil, level); return wr }, - customCompressLevelPool: &sync.Pool{New: func() interface{} { wr, _ := zlib.NewWriterLevel(nil, gzipCompressLevel); return wr }}, - bestCompressionPool: &sync.Pool{New: func() interface{} { wr, _ := zlib.NewWriterLevel(nil, flate.BestCompression); return wr }}, - } -) - -var ( - encoderMap = map[string]acceptEncoder{ // all the other compress methods will ignore - "gzip": gzipCompressEncoder, - "deflate": deflateCompressEncoder, - "*": gzipCompressEncoder, // * means any compress will accept,we prefer gzip - "identity": noneCompressEncoder, // identity means none-compress - } -) - -// WriteFile reads from file and writes to writer by the specific encoding(gzip/deflate) -func WriteFile(encoding string, writer io.Writer, file *os.File) (bool, string, error) { - return writeLevel(encoding, writer, file, flate.BestCompression) -} - -// WriteBody reads writes content to writer by the specific encoding(gzip/deflate) -func WriteBody(encoding string, writer io.Writer, content []byte) (bool, string, error) { - if encoding == "" || len(content) < gzipMinLength { - _, err := writer.Write(content) - return false, "", err - } - return writeLevel(encoding, writer, bytes.NewReader(content), gzipCompressLevel) -} - -// writeLevel reads from reader,writes to writer by specific encoding and compress level -// the compress level is defined by deflate package -func writeLevel(encoding string, writer io.Writer, reader io.Reader, level int) (bool, string, error) { - var outputWriter resetWriter - var err error - var ce = noneCompressEncoder - - if cf, ok := encoderMap[encoding]; ok { - ce = cf - } - encoding = ce.name - outputWriter = ce.encode(writer, level) - defer ce.put(outputWriter, level) - - _, err = io.Copy(outputWriter, reader) - if err != nil { - return false, "", err - } - - switch outputWriter.(type) { - case io.WriteCloser: - outputWriter.(io.WriteCloser).Close() - } - return encoding != "", encoding, nil -} - -// ParseEncoding will extract the right encoding for response -// the Accept-Encoding's sec is here: -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3 -func ParseEncoding(r *http.Request) string { - if r == nil { - return "" - } - if (getMethodOnly && r.Method == "GET") || includedMethods[r.Method] { - return parseEncoding(r) - } - return "" -} - -type q struct { - name string - value float64 -} - -func parseEncoding(r *http.Request) string { - acceptEncoding := r.Header.Get("Accept-Encoding") - if acceptEncoding == "" { - return "" - } - var lastQ q - for _, v := range strings.Split(acceptEncoding, ",") { - v = strings.TrimSpace(v) - if v == "" { - continue - } - vs := strings.Split(v, ";") - var cf acceptEncoder - var ok bool - if cf, ok = encoderMap[vs[0]]; !ok { - continue - } - if len(vs) == 1 { - return cf.name - } - if len(vs) == 2 { - f, _ := strconv.ParseFloat(strings.Replace(vs[1], "q=", "", -1), 64) - if f == 0 { - continue - } - if f > lastQ.value { - lastQ = q{cf.name, f} - } - } - } - return lastQ.name -} diff --git a/vendor/github.com/astaxie/beego/context/context.go b/vendor/github.com/astaxie/beego/context/context.go deleted file mode 100644 index bbd58299f..000000000 --- a/vendor/github.com/astaxie/beego/context/context.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package context provide the context utils -// Usage: -// -// import "github.com/astaxie/beego/context" -// -// ctx := context.Context{Request:req,ResponseWriter:rw} -// -// more docs http://beego.me/docs/module/context.md -package context - -import ( - "bufio" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "errors" - "fmt" - "net" - "net/http" - "strconv" - "strings" - "time" - - "github.com/astaxie/beego/utils" -) - -//commonly used mime-types -const ( - ApplicationJSON = "application/json" - ApplicationXML = "application/xml" - ApplicationYAML = "application/x-yaml" - TextXML = "text/xml" -) - -// NewContext return the Context with Input and Output -func NewContext() *Context { - return &Context{ - Input: NewInput(), - Output: NewOutput(), - } -} - -// Context Http request context struct including BeegoInput, BeegoOutput, http.Request and http.ResponseWriter. -// BeegoInput and BeegoOutput provides some api to operate request and response more easily. -type Context struct { - Input *BeegoInput - Output *BeegoOutput - Request *http.Request - ResponseWriter *Response - _xsrfToken string -} - -// Reset init Context, BeegoInput and BeegoOutput -func (ctx *Context) Reset(rw http.ResponseWriter, r *http.Request) { - ctx.Request = r - if ctx.ResponseWriter == nil { - ctx.ResponseWriter = &Response{} - } - ctx.ResponseWriter.reset(rw) - ctx.Input.Reset(ctx) - ctx.Output.Reset(ctx) - ctx._xsrfToken = "" -} - -// Redirect does redirection to localurl with http header status code. -func (ctx *Context) Redirect(status int, localurl string) { - http.Redirect(ctx.ResponseWriter, ctx.Request, localurl, status) -} - -// Abort stops this request. -// if beego.ErrorMaps exists, panic body. -func (ctx *Context) Abort(status int, body string) { - ctx.Output.SetStatus(status) - panic(body) -} - -// WriteString Write string to response body. -// it sends response body. -func (ctx *Context) WriteString(content string) { - ctx.ResponseWriter.Write([]byte(content)) -} - -// GetCookie Get cookie from request by a given key. -// It's alias of BeegoInput.Cookie. -func (ctx *Context) GetCookie(key string) string { - return ctx.Input.Cookie(key) -} - -// SetCookie Set cookie for response. -// It's alias of BeegoOutput.Cookie. -func (ctx *Context) SetCookie(name string, value string, others ...interface{}) { - ctx.Output.Cookie(name, value, others...) -} - -// GetSecureCookie Get secure cookie from request by a given key. -func (ctx *Context) GetSecureCookie(Secret, key string) (string, bool) { - val := ctx.Input.Cookie(key) - if val == "" { - return "", false - } - - parts := strings.SplitN(val, "|", 3) - - if len(parts) != 3 { - return "", false - } - - vs := parts[0] - timestamp := parts[1] - sig := parts[2] - - h := hmac.New(sha1.New, []byte(Secret)) - fmt.Fprintf(h, "%s%s", vs, timestamp) - - if fmt.Sprintf("%02x", h.Sum(nil)) != sig { - return "", false - } - res, _ := base64.URLEncoding.DecodeString(vs) - return string(res), true -} - -// SetSecureCookie Set Secure cookie for response. -func (ctx *Context) SetSecureCookie(Secret, name, value string, others ...interface{}) { - vs := base64.URLEncoding.EncodeToString([]byte(value)) - timestamp := strconv.FormatInt(time.Now().UnixNano(), 10) - h := hmac.New(sha1.New, []byte(Secret)) - fmt.Fprintf(h, "%s%s", vs, timestamp) - sig := fmt.Sprintf("%02x", h.Sum(nil)) - cookie := strings.Join([]string{vs, timestamp, sig}, "|") - ctx.Output.Cookie(name, cookie, others...) -} - -// XSRFToken creates a xsrf token string and returns. -func (ctx *Context) XSRFToken(key string, expire int64) string { - if ctx._xsrfToken == "" { - token, ok := ctx.GetSecureCookie(key, "_xsrf") - if !ok { - token = string(utils.RandomCreateBytes(32)) - ctx.SetSecureCookie(key, "_xsrf", token, expire) - } - ctx._xsrfToken = token - } - return ctx._xsrfToken -} - -// CheckXSRFCookie checks xsrf token in this request is valid or not. -// the token can provided in request header "X-Xsrftoken" and "X-CsrfToken" -// or in form field value named as "_xsrf". -func (ctx *Context) CheckXSRFCookie() bool { - token := ctx.Input.Query("_xsrf") - if token == "" { - token = ctx.Request.Header.Get("X-Xsrftoken") - } - if token == "" { - token = ctx.Request.Header.Get("X-Csrftoken") - } - if token == "" { - ctx.Abort(403, "'_xsrf' argument missing from POST") - return false - } - if ctx._xsrfToken != token { - ctx.Abort(403, "XSRF cookie does not match POST argument") - return false - } - return true -} - -// RenderMethodResult renders the return value of a controller method to the output -func (ctx *Context) RenderMethodResult(result interface{}) { - if result != nil { - renderer, ok := result.(Renderer) - if !ok { - err, ok := result.(error) - if ok { - renderer = errorRenderer(err) - } else { - renderer = jsonRenderer(result) - } - } - renderer.Render(ctx) - } -} - -//Response is a wrapper for the http.ResponseWriter -//started set to true if response was written to then don't execute other handler -type Response struct { - http.ResponseWriter - Started bool - Status int - Elapsed time.Duration -} - -func (r *Response) reset(rw http.ResponseWriter) { - r.ResponseWriter = rw - r.Status = 0 - r.Started = false -} - -// Write writes the data to the connection as part of an HTTP reply, -// and sets `started` to true. -// started means the response has sent out. -func (r *Response) Write(p []byte) (int, error) { - r.Started = true - return r.ResponseWriter.Write(p) -} - -// WriteHeader sends an HTTP response header with status code, -// and sets `started` to true. -func (r *Response) WriteHeader(code int) { - if r.Status > 0 { - //prevent multiple response.WriteHeader calls - return - } - r.Status = code - r.Started = true - r.ResponseWriter.WriteHeader(code) -} - -// Hijack hijacker for http -func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hj, ok := r.ResponseWriter.(http.Hijacker) - if !ok { - return nil, nil, errors.New("webserver doesn't support hijacking") - } - return hj.Hijack() -} - -// Flush http.Flusher -func (r *Response) Flush() { - if f, ok := r.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} - -// CloseNotify http.CloseNotifier -func (r *Response) CloseNotify() <-chan bool { - if cn, ok := r.ResponseWriter.(http.CloseNotifier); ok { - return cn.CloseNotify() - } - return nil -} - -// Pusher http.Pusher -func (r *Response) Pusher() (pusher http.Pusher) { - if pusher, ok := r.ResponseWriter.(http.Pusher); ok { - return pusher - } - return nil -} diff --git a/vendor/github.com/astaxie/beego/context/input.go b/vendor/github.com/astaxie/beego/context/input.go deleted file mode 100644 index 819521588..000000000 --- a/vendor/github.com/astaxie/beego/context/input.go +++ /dev/null @@ -1,667 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package context - -import ( - "bytes" - "compress/gzip" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/astaxie/beego/session" -) - -// Regexes for checking the accept headers -// TODO make sure these are correct -var ( - acceptsHTMLRegex = regexp.MustCompile(`(text/html|application/xhtml\+xml)(?:,|$)`) - acceptsXMLRegex = regexp.MustCompile(`(application/xml|text/xml)(?:,|$)`) - acceptsJSONRegex = regexp.MustCompile(`(application/json)(?:,|$)`) - acceptsYAMLRegex = regexp.MustCompile(`(application/x-yaml)(?:,|$)`) - maxParam = 50 -) - -// BeegoInput operates the http request header, data, cookie and body. -// it also contains router params and current session. -type BeegoInput struct { - Context *Context - CruSession session.Store - pnames []string - pvalues []string - data map[interface{}]interface{} // store some values in this context when calling context in filter or controller. - RequestBody []byte - RunMethod string - RunController reflect.Type -} - -// NewInput return BeegoInput generated by Context. -func NewInput() *BeegoInput { - return &BeegoInput{ - pnames: make([]string, 0, maxParam), - pvalues: make([]string, 0, maxParam), - data: make(map[interface{}]interface{}), - } -} - -// Reset init the BeegoInput -func (input *BeegoInput) Reset(ctx *Context) { - input.Context = ctx - input.CruSession = nil - input.pnames = input.pnames[:0] - input.pvalues = input.pvalues[:0] - input.data = nil - input.RequestBody = []byte{} -} - -// Protocol returns request protocol name, such as HTTP/1.1 . -func (input *BeegoInput) Protocol() string { - return input.Context.Request.Proto -} - -// URI returns full request url with query string, fragment. -func (input *BeegoInput) URI() string { - return input.Context.Request.RequestURI -} - -// URL returns request url path (without query string, fragment). -func (input *BeegoInput) URL() string { - return input.Context.Request.URL.Path -} - -// Site returns base site url as scheme://domain type. -func (input *BeegoInput) Site() string { - return input.Scheme() + "://" + input.Domain() -} - -// Scheme returns request scheme as "http" or "https". -func (input *BeegoInput) Scheme() string { - if scheme := input.Header("X-Forwarded-Proto"); scheme != "" { - return scheme - } - if input.Context.Request.URL.Scheme != "" { - return input.Context.Request.URL.Scheme - } - if input.Context.Request.TLS == nil { - return "http" - } - return "https" -} - -// Domain returns host name. -// Alias of Host method. -func (input *BeegoInput) Domain() string { - return input.Host() -} - -// Host returns host name. -// if no host info in request, return localhost. -func (input *BeegoInput) Host() string { - if input.Context.Request.Host != "" { - if hostPart, _, err := net.SplitHostPort(input.Context.Request.Host); err == nil { - return hostPart - } - return input.Context.Request.Host - } - return "localhost" -} - -// Method returns http request method. -func (input *BeegoInput) Method() string { - return input.Context.Request.Method -} - -// Is returns boolean of this request is on given method, such as Is("POST"). -func (input *BeegoInput) Is(method string) bool { - return input.Method() == method -} - -// IsGet Is this a GET method request? -func (input *BeegoInput) IsGet() bool { - return input.Is("GET") -} - -// IsPost Is this a POST method request? -func (input *BeegoInput) IsPost() bool { - return input.Is("POST") -} - -// IsHead Is this a Head method request? -func (input *BeegoInput) IsHead() bool { - return input.Is("HEAD") -} - -// IsOptions Is this a OPTIONS method request? -func (input *BeegoInput) IsOptions() bool { - return input.Is("OPTIONS") -} - -// IsPut Is this a PUT method request? -func (input *BeegoInput) IsPut() bool { - return input.Is("PUT") -} - -// IsDelete Is this a DELETE method request? -func (input *BeegoInput) IsDelete() bool { - return input.Is("DELETE") -} - -// IsPatch Is this a PATCH method request? -func (input *BeegoInput) IsPatch() bool { - return input.Is("PATCH") -} - -// IsAjax returns boolean of this request is generated by ajax. -func (input *BeegoInput) IsAjax() bool { - return input.Header("X-Requested-With") == "XMLHttpRequest" -} - -// IsSecure returns boolean of this request is in https. -func (input *BeegoInput) IsSecure() bool { - return input.Scheme() == "https" -} - -// IsWebsocket returns boolean of this request is in webSocket. -func (input *BeegoInput) IsWebsocket() bool { - return input.Header("Upgrade") == "websocket" -} - -// IsUpload returns boolean of whether file uploads in this request or not.. -func (input *BeegoInput) IsUpload() bool { - return strings.Contains(input.Header("Content-Type"), "multipart/form-data") -} - -// AcceptsHTML Checks if request accepts html response -func (input *BeegoInput) AcceptsHTML() bool { - return acceptsHTMLRegex.MatchString(input.Header("Accept")) -} - -// AcceptsXML Checks if request accepts xml response -func (input *BeegoInput) AcceptsXML() bool { - return acceptsXMLRegex.MatchString(input.Header("Accept")) -} - -// AcceptsJSON Checks if request accepts json response -func (input *BeegoInput) AcceptsJSON() bool { - return acceptsJSONRegex.MatchString(input.Header("Accept")) -} -// AcceptsYAML Checks if request accepts json response -func (input *BeegoInput) AcceptsYAML() bool { - return acceptsYAMLRegex.MatchString(input.Header("Accept")) -} - -// IP returns request client ip. -// if in proxy, return first proxy id. -// if error, return RemoteAddr. -func (input *BeegoInput) IP() string { - ips := input.Proxy() - if len(ips) > 0 && ips[0] != "" { - rip, _, err := net.SplitHostPort(ips[0]) - if err != nil { - rip = ips[0] - } - return rip - } - if ip, _, err := net.SplitHostPort(input.Context.Request.RemoteAddr); err == nil { - return ip - } - return input.Context.Request.RemoteAddr -} - -// Proxy returns proxy client ips slice. -func (input *BeegoInput) Proxy() []string { - if ips := input.Header("X-Forwarded-For"); ips != "" { - return strings.Split(ips, ",") - } - return []string{} -} - -// Referer returns http referer header. -func (input *BeegoInput) Referer() string { - return input.Header("Referer") -} - -// Refer returns http referer header. -func (input *BeegoInput) Refer() string { - return input.Referer() -} - -// SubDomains returns sub domain string. -// if aa.bb.domain.com, returns aa.bb . -func (input *BeegoInput) SubDomains() string { - parts := strings.Split(input.Host(), ".") - if len(parts) >= 3 { - return strings.Join(parts[:len(parts)-2], ".") - } - return "" -} - -// Port returns request client port. -// when error or empty, return 80. -func (input *BeegoInput) Port() int { - if _, portPart, err := net.SplitHostPort(input.Context.Request.Host); err == nil { - port, _ := strconv.Atoi(portPart) - return port - } - return 80 -} - -// UserAgent returns request client user agent string. -func (input *BeegoInput) UserAgent() string { - return input.Header("User-Agent") -} - -// ParamsLen return the length of the params -func (input *BeegoInput) ParamsLen() int { - return len(input.pnames) -} - -// Param returns router param by a given key. -func (input *BeegoInput) Param(key string) string { - for i, v := range input.pnames { - if v == key && i <= len(input.pvalues) { - return input.pvalues[i] - } - } - return "" -} - -// Params returns the map[key]value. -func (input *BeegoInput) Params() map[string]string { - m := make(map[string]string) - for i, v := range input.pnames { - if i <= len(input.pvalues) { - m[v] = input.pvalues[i] - } - } - return m -} - -// SetParam will set the param with key and value -func (input *BeegoInput) SetParam(key, val string) { - // check if already exists - for i, v := range input.pnames { - if v == key && i <= len(input.pvalues) { - input.pvalues[i] = val - return - } - } - input.pvalues = append(input.pvalues, val) - input.pnames = append(input.pnames, key) -} - -// ResetParams clears any of the input's Params -// This function is used to clear parameters so they may be reset between filter -// passes. -func (input *BeegoInput) ResetParams() { - input.pnames = input.pnames[:0] - input.pvalues = input.pvalues[:0] -} - -// Query returns input data item string by a given string. -func (input *BeegoInput) Query(key string) string { - if val := input.Param(key); val != "" { - return val - } - if input.Context.Request.Form == nil { - input.Context.Request.ParseForm() - } - return input.Context.Request.Form.Get(key) -} - -// Header returns request header item string by a given string. -// if non-existed, return empty string. -func (input *BeegoInput) Header(key string) string { - return input.Context.Request.Header.Get(key) -} - -// Cookie returns request cookie item string by a given key. -// if non-existed, return empty string. -func (input *BeegoInput) Cookie(key string) string { - ck, err := input.Context.Request.Cookie(key) - if err != nil { - return "" - } - return ck.Value -} - -// Session returns current session item value by a given key. -// if non-existed, return nil. -func (input *BeegoInput) Session(key interface{}) interface{} { - return input.CruSession.Get(key) -} - -// CopyBody returns the raw request body data as bytes. -func (input *BeegoInput) CopyBody(MaxMemory int64) []byte { - if input.Context.Request.Body == nil { - return []byte{} - } - - var requestbody []byte - safe := &io.LimitedReader{R: input.Context.Request.Body, N: MaxMemory} - if input.Header("Content-Encoding") == "gzip" { - reader, err := gzip.NewReader(safe) - if err != nil { - return nil - } - requestbody, _ = ioutil.ReadAll(reader) - } else { - requestbody, _ = ioutil.ReadAll(safe) - } - - input.Context.Request.Body.Close() - bf := bytes.NewBuffer(requestbody) - input.Context.Request.Body = http.MaxBytesReader(input.Context.ResponseWriter, ioutil.NopCloser(bf), MaxMemory) - input.RequestBody = requestbody - return requestbody -} - -// Data return the implicit data in the input -func (input *BeegoInput) Data() map[interface{}]interface{} { - if input.data == nil { - input.data = make(map[interface{}]interface{}) - } - return input.data -} - -// GetData returns the stored data in this context. -func (input *BeegoInput) GetData(key interface{}) interface{} { - if v, ok := input.data[key]; ok { - return v - } - return nil -} - -// SetData stores data with given key in this context. -// This data are only available in this context. -func (input *BeegoInput) SetData(key, val interface{}) { - if input.data == nil { - input.data = make(map[interface{}]interface{}) - } - input.data[key] = val -} - -// ParseFormOrMulitForm parseForm or parseMultiForm based on Content-type -func (input *BeegoInput) ParseFormOrMulitForm(maxMemory int64) error { - // Parse the body depending on the content type. - if strings.Contains(input.Header("Content-Type"), "multipart/form-data") { - if err := input.Context.Request.ParseMultipartForm(maxMemory); err != nil { - return errors.New("Error parsing request body:" + err.Error()) - } - } else if err := input.Context.Request.ParseForm(); err != nil { - return errors.New("Error parsing request body:" + err.Error()) - } - return nil -} - -// Bind data from request.Form[key] to dest -// like /?id=123&isok=true&ft=1.2&ol[0]=1&ol[1]=2&ul[]=str&ul[]=array&user.Name=astaxie -// var id int beegoInput.Bind(&id, "id") id ==123 -// var isok bool beegoInput.Bind(&isok, "isok") isok ==true -// var ft float64 beegoInput.Bind(&ft, "ft") ft ==1.2 -// ol := make([]int, 0, 2) beegoInput.Bind(&ol, "ol") ol ==[1 2] -// ul := make([]string, 0, 2) beegoInput.Bind(&ul, "ul") ul ==[str array] -// user struct{Name} beegoInput.Bind(&user, "user") user == {Name:"astaxie"} -func (input *BeegoInput) Bind(dest interface{}, key string) error { - value := reflect.ValueOf(dest) - if value.Kind() != reflect.Ptr { - return errors.New("beego: non-pointer passed to Bind: " + key) - } - value = value.Elem() - if !value.CanSet() { - return errors.New("beego: non-settable variable passed to Bind: " + key) - } - typ := value.Type() - // Get real type if dest define with interface{}. - // e.g var dest interface{} dest=1.0 - if value.Kind() == reflect.Interface { - typ = value.Elem().Type() - } - rv := input.bind(key, typ) - if !rv.IsValid() { - return errors.New("beego: reflect value is empty") - } - value.Set(rv) - return nil -} - -func (input *BeegoInput) bind(key string, typ reflect.Type) reflect.Value { - if input.Context.Request.Form == nil { - input.Context.Request.ParseForm() - } - rv := reflect.Zero(typ) - switch typ.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := input.Query(key) - if len(val) == 0 { - return rv - } - rv = input.bindInt(val, typ) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - val := input.Query(key) - if len(val) == 0 { - return rv - } - rv = input.bindUint(val, typ) - case reflect.Float32, reflect.Float64: - val := input.Query(key) - if len(val) == 0 { - return rv - } - rv = input.bindFloat(val, typ) - case reflect.String: - val := input.Query(key) - if len(val) == 0 { - return rv - } - rv = input.bindString(val, typ) - case reflect.Bool: - val := input.Query(key) - if len(val) == 0 { - return rv - } - rv = input.bindBool(val, typ) - case reflect.Slice: - rv = input.bindSlice(&input.Context.Request.Form, key, typ) - case reflect.Struct: - rv = input.bindStruct(&input.Context.Request.Form, key, typ) - case reflect.Ptr: - rv = input.bindPoint(key, typ) - case reflect.Map: - rv = input.bindMap(&input.Context.Request.Form, key, typ) - } - return rv -} - -func (input *BeegoInput) bindValue(val string, typ reflect.Type) reflect.Value { - rv := reflect.Zero(typ) - switch typ.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - rv = input.bindInt(val, typ) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - rv = input.bindUint(val, typ) - case reflect.Float32, reflect.Float64: - rv = input.bindFloat(val, typ) - case reflect.String: - rv = input.bindString(val, typ) - case reflect.Bool: - rv = input.bindBool(val, typ) - case reflect.Slice: - rv = input.bindSlice(&url.Values{"": {val}}, "", typ) - case reflect.Struct: - rv = input.bindStruct(&url.Values{"": {val}}, "", typ) - case reflect.Ptr: - rv = input.bindPoint(val, typ) - case reflect.Map: - rv = input.bindMap(&url.Values{"": {val}}, "", typ) - } - return rv -} - -func (input *BeegoInput) bindInt(val string, typ reflect.Type) reflect.Value { - intValue, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return reflect.Zero(typ) - } - pValue := reflect.New(typ) - pValue.Elem().SetInt(intValue) - return pValue.Elem() -} - -func (input *BeegoInput) bindUint(val string, typ reflect.Type) reflect.Value { - uintValue, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return reflect.Zero(typ) - } - pValue := reflect.New(typ) - pValue.Elem().SetUint(uintValue) - return pValue.Elem() -} - -func (input *BeegoInput) bindFloat(val string, typ reflect.Type) reflect.Value { - floatValue, err := strconv.ParseFloat(val, 64) - if err != nil { - return reflect.Zero(typ) - } - pValue := reflect.New(typ) - pValue.Elem().SetFloat(floatValue) - return pValue.Elem() -} - -func (input *BeegoInput) bindString(val string, typ reflect.Type) reflect.Value { - return reflect.ValueOf(val) -} - -func (input *BeegoInput) bindBool(val string, typ reflect.Type) reflect.Value { - val = strings.TrimSpace(strings.ToLower(val)) - switch val { - case "true", "on", "1": - return reflect.ValueOf(true) - } - return reflect.ValueOf(false) -} - -type sliceValue struct { - index int // Index extracted from brackets. If -1, no index was provided. - value reflect.Value // the bound value for this slice element. -} - -func (input *BeegoInput) bindSlice(params *url.Values, key string, typ reflect.Type) reflect.Value { - maxIndex := -1 - numNoIndex := 0 - sliceValues := []sliceValue{} - for reqKey, vals := range *params { - if !strings.HasPrefix(reqKey, key+"[") { - continue - } - // Extract the index, and the index where a sub-key starts. (e.g. field[0].subkey) - index := -1 - leftBracket, rightBracket := len(key), strings.Index(reqKey[len(key):], "]")+len(key) - if rightBracket > leftBracket+1 { - index, _ = strconv.Atoi(reqKey[leftBracket+1 : rightBracket]) - } - subKeyIndex := rightBracket + 1 - - // Handle the indexed case. - if index > -1 { - if index > maxIndex { - maxIndex = index - } - sliceValues = append(sliceValues, sliceValue{ - index: index, - value: input.bind(reqKey[:subKeyIndex], typ.Elem()), - }) - continue - } - - // It's an un-indexed element. (e.g. element[]) - numNoIndex += len(vals) - for _, val := range vals { - // Unindexed values can only be direct-bound. - sliceValues = append(sliceValues, sliceValue{ - index: -1, - value: input.bindValue(val, typ.Elem()), - }) - } - } - resultArray := reflect.MakeSlice(typ, maxIndex+1, maxIndex+1+numNoIndex) - for _, sv := range sliceValues { - if sv.index != -1 { - resultArray.Index(sv.index).Set(sv.value) - } else { - resultArray = reflect.Append(resultArray, sv.value) - } - } - return resultArray -} - -func (input *BeegoInput) bindStruct(params *url.Values, key string, typ reflect.Type) reflect.Value { - result := reflect.New(typ).Elem() - fieldValues := make(map[string]reflect.Value) - for reqKey, val := range *params { - var fieldName string - if strings.HasPrefix(reqKey, key+".") { - fieldName = reqKey[len(key)+1:] - } else if strings.HasPrefix(reqKey, key+"[") && reqKey[len(reqKey)-1] == ']' { - fieldName = reqKey[len(key)+1 : len(reqKey)-1] - } else { - continue - } - - if _, ok := fieldValues[fieldName]; !ok { - // Time to bind this field. Get it and make sure we can set it. - fieldValue := result.FieldByName(fieldName) - if !fieldValue.IsValid() { - continue - } - if !fieldValue.CanSet() { - continue - } - boundVal := input.bindValue(val[0], fieldValue.Type()) - fieldValue.Set(boundVal) - fieldValues[fieldName] = boundVal - } - } - - return result -} - -func (input *BeegoInput) bindPoint(key string, typ reflect.Type) reflect.Value { - return input.bind(key, typ.Elem()).Addr() -} - -func (input *BeegoInput) bindMap(params *url.Values, key string, typ reflect.Type) reflect.Value { - var ( - result = reflect.MakeMap(typ) - keyType = typ.Key() - valueType = typ.Elem() - ) - for paramName, values := range *params { - if !strings.HasPrefix(paramName, key+"[") || paramName[len(paramName)-1] != ']' { - continue - } - - key := paramName[len(key)+1 : len(paramName)-1] - result.SetMapIndex(input.bindValue(key, keyType), input.bindValue(values[0], valueType)) - } - return result -} diff --git a/vendor/github.com/astaxie/beego/context/output.go b/vendor/github.com/astaxie/beego/context/output.go deleted file mode 100644 index 3e277ab20..000000000 --- a/vendor/github.com/astaxie/beego/context/output.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package context - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "html/template" - "io" - "mime" - "net/http" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" - "gopkg.in/yaml.v2" -) - -// BeegoOutput does work for sending response header. -type BeegoOutput struct { - Context *Context - Status int - EnableGzip bool -} - -// NewOutput returns new BeegoOutput. -// it contains nothing now. -func NewOutput() *BeegoOutput { - return &BeegoOutput{} -} - -// Reset init BeegoOutput -func (output *BeegoOutput) Reset(ctx *Context) { - output.Context = ctx - output.Status = 0 -} - -// Header sets response header item string via given key. -func (output *BeegoOutput) Header(key, val string) { - output.Context.ResponseWriter.Header().Set(key, val) -} - -// Body sets response body content. -// if EnableGzip, compress content string. -// it sends out response body directly. -func (output *BeegoOutput) Body(content []byte) error { - var encoding string - var buf = &bytes.Buffer{} - if output.EnableGzip { - encoding = ParseEncoding(output.Context.Request) - } - if b, n, _ := WriteBody(encoding, buf, content); b { - output.Header("Content-Encoding", n) - output.Header("Content-Length", strconv.Itoa(buf.Len())) - } else { - output.Header("Content-Length", strconv.Itoa(len(content))) - } - // Write status code if it has been set manually - // Set it to 0 afterwards to prevent "multiple response.WriteHeader calls" - if output.Status != 0 { - output.Context.ResponseWriter.WriteHeader(output.Status) - output.Status = 0 - } else { - output.Context.ResponseWriter.Started = true - } - io.Copy(output.Context.ResponseWriter, buf) - return nil -} - -// Cookie sets cookie value via given key. -// others are ordered as cookie's max age time, path,domain, secure and httponly. -func (output *BeegoOutput) Cookie(name string, value string, others ...interface{}) { - var b bytes.Buffer - fmt.Fprintf(&b, "%s=%s", sanitizeName(name), sanitizeValue(value)) - - //fix cookie not work in IE - if len(others) > 0 { - var maxAge int64 - - switch v := others[0].(type) { - case int: - maxAge = int64(v) - case int32: - maxAge = int64(v) - case int64: - maxAge = v - } - - switch { - case maxAge > 0: - fmt.Fprintf(&b, "; Expires=%s; Max-Age=%d", time.Now().Add(time.Duration(maxAge)*time.Second).UTC().Format(time.RFC1123), maxAge) - case maxAge < 0: - fmt.Fprintf(&b, "; Max-Age=0") - } - } - - // the settings below - // Path, Domain, Secure, HttpOnly - // can use nil skip set - - // default "/" - if len(others) > 1 { - if v, ok := others[1].(string); ok && len(v) > 0 { - fmt.Fprintf(&b, "; Path=%s", sanitizeValue(v)) - } - } else { - fmt.Fprintf(&b, "; Path=%s", "/") - } - - // default empty - if len(others) > 2 { - if v, ok := others[2].(string); ok && len(v) > 0 { - fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(v)) - } - } - - // default empty - if len(others) > 3 { - var secure bool - switch v := others[3].(type) { - case bool: - secure = v - default: - if others[3] != nil { - secure = true - } - } - if secure { - fmt.Fprintf(&b, "; Secure") - } - } - - // default false. for session cookie default true - if len(others) > 4 { - if v, ok := others[4].(bool); ok && v { - fmt.Fprintf(&b, "; HttpOnly") - } - } - - output.Context.ResponseWriter.Header().Add("Set-Cookie", b.String()) -} - -var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") - -func sanitizeName(n string) string { - return cookieNameSanitizer.Replace(n) -} - -var cookieValueSanitizer = strings.NewReplacer("\n", " ", "\r", " ", ";", " ") - -func sanitizeValue(v string) string { - return cookieValueSanitizer.Replace(v) -} - -func jsonRenderer(value interface{}) Renderer { - return rendererFunc(func(ctx *Context) { - ctx.Output.JSON(value, false, false) - }) -} - -func errorRenderer(err error) Renderer { - return rendererFunc(func(ctx *Context) { - ctx.Output.SetStatus(500) - ctx.Output.Body([]byte(err.Error())) - }) -} - -// JSON writes json to response body. -// if encoding is true, it converts utf-8 to \u0000 type. -func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, encoding bool) error { - output.Header("Content-Type", "application/json; charset=utf-8") - var content []byte - var err error - if hasIndent { - content, err = json.MarshalIndent(data, "", " ") - } else { - content, err = json.Marshal(data) - } - if err != nil { - http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) - return err - } - if encoding { - content = []byte(stringsToJSON(string(content))) - } - return output.Body(content) -} - - -// YAML writes yaml to response body. -func (output *BeegoOutput) YAML(data interface{}) error { - output.Header("Content-Type", "application/x-yaml; charset=utf-8") - var content []byte - var err error - content, err = yaml.Marshal(data) - if err != nil { - http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) - return err - } - return output.Body(content) -} - -// JSONP writes jsonp to response body. -func (output *BeegoOutput) JSONP(data interface{}, hasIndent bool) error { - output.Header("Content-Type", "application/javascript; charset=utf-8") - var content []byte - var err error - if hasIndent { - content, err = json.MarshalIndent(data, "", " ") - } else { - content, err = json.Marshal(data) - } - if err != nil { - http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) - return err - } - callback := output.Context.Input.Query("callback") - if callback == "" { - return errors.New(`"callback" parameter required`) - } - callback = template.JSEscapeString(callback) - callbackContent := bytes.NewBufferString(" if(window." + callback + ")" + callback) - callbackContent.WriteString("(") - callbackContent.Write(content) - callbackContent.WriteString(");\r\n") - return output.Body(callbackContent.Bytes()) -} - -// XML writes xml string to response body. -func (output *BeegoOutput) XML(data interface{}, hasIndent bool) error { - output.Header("Content-Type", "application/xml; charset=utf-8") - var content []byte - var err error - if hasIndent { - content, err = xml.MarshalIndent(data, "", " ") - } else { - content, err = xml.Marshal(data) - } - if err != nil { - http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) - return err - } - return output.Body(content) -} - -// ServeFormatted serve YAML, XML OR JSON, depending on the value of the Accept header -func (output *BeegoOutput) ServeFormatted(data interface{}, hasIndent bool, hasEncode ...bool) { - accept := output.Context.Input.Header("Accept") - switch accept { - case ApplicationYAML: - output.YAML(data) - case ApplicationXML, TextXML: - output.XML(data, hasIndent) - default: - output.JSON(data, hasIndent, len(hasEncode) > 0 && hasEncode[0]) - } -} - -// Download forces response for download file. -// it prepares the download response header automatically. -func (output *BeegoOutput) Download(file string, filename ...string) { - // check get file error, file not found or other error. - if _, err := os.Stat(file); err != nil { - http.ServeFile(output.Context.ResponseWriter, output.Context.Request, file) - return - } - - var fName string - if len(filename) > 0 && filename[0] != "" { - fName = filename[0] - } else { - fName = filepath.Base(file) - } - output.Header("Content-Disposition", "attachment; filename="+url.PathEscape(fName)) - output.Header("Content-Description", "File Transfer") - output.Header("Content-Type", "application/octet-stream") - output.Header("Content-Transfer-Encoding", "binary") - output.Header("Expires", "0") - output.Header("Cache-Control", "must-revalidate") - output.Header("Pragma", "public") - http.ServeFile(output.Context.ResponseWriter, output.Context.Request, file) -} - -// ContentType sets the content type from ext string. -// MIME type is given in mime package. -func (output *BeegoOutput) ContentType(ext string) { - if !strings.HasPrefix(ext, ".") { - ext = "." + ext - } - ctype := mime.TypeByExtension(ext) - if ctype != "" { - output.Header("Content-Type", ctype) - } -} - -// SetStatus sets response status code. -// It writes response header directly. -func (output *BeegoOutput) SetStatus(status int) { - output.Status = status -} - -// IsCachable returns boolean of this request is cached. -// HTTP 304 means cached. -func (output *BeegoOutput) IsCachable() bool { - return output.Status >= 200 && output.Status < 300 || output.Status == 304 -} - -// IsEmpty returns boolean of this request is empty. -// HTTP 201,204 and 304 means empty. -func (output *BeegoOutput) IsEmpty() bool { - return output.Status == 201 || output.Status == 204 || output.Status == 304 -} - -// IsOk returns boolean of this request runs well. -// HTTP 200 means ok. -func (output *BeegoOutput) IsOk() bool { - return output.Status == 200 -} - -// IsSuccessful returns boolean of this request runs successfully. -// HTTP 2xx means ok. -func (output *BeegoOutput) IsSuccessful() bool { - return output.Status >= 200 && output.Status < 300 -} - -// IsRedirect returns boolean of this request is redirection header. -// HTTP 301,302,307 means redirection. -func (output *BeegoOutput) IsRedirect() bool { - return output.Status == 301 || output.Status == 302 || output.Status == 303 || output.Status == 307 -} - -// IsForbidden returns boolean of this request is forbidden. -// HTTP 403 means forbidden. -func (output *BeegoOutput) IsForbidden() bool { - return output.Status == 403 -} - -// IsNotFound returns boolean of this request is not found. -// HTTP 404 means not found. -func (output *BeegoOutput) IsNotFound() bool { - return output.Status == 404 -} - -// IsClientError returns boolean of this request client sends error data. -// HTTP 4xx means client error. -func (output *BeegoOutput) IsClientError() bool { - return output.Status >= 400 && output.Status < 500 -} - -// IsServerError returns boolean of this server handler errors. -// HTTP 5xx means server internal error. -func (output *BeegoOutput) IsServerError() bool { - return output.Status >= 500 && output.Status < 600 -} - -func stringsToJSON(str string) string { - var jsons bytes.Buffer - for _, r := range str { - rint := int(r) - if rint < 128 { - jsons.WriteRune(r) - } else { - jsons.WriteString("\\u") - if rint < 0x100 { - jsons.WriteString("00") - } else if rint < 0x1000 { - jsons.WriteString("0") - } - jsons.WriteString(strconv.FormatInt(int64(rint), 16)) - } - } - return jsons.String() -} - -// Session sets session item value with given key. -func (output *BeegoOutput) Session(name interface{}, value interface{}) { - output.Context.Input.CruSession.Set(name, value) -} diff --git a/vendor/github.com/astaxie/beego/context/param/conv.go b/vendor/github.com/astaxie/beego/context/param/conv.go deleted file mode 100644 index c200e0088..000000000 --- a/vendor/github.com/astaxie/beego/context/param/conv.go +++ /dev/null @@ -1,78 +0,0 @@ -package param - -import ( - "fmt" - "reflect" - - beecontext "github.com/astaxie/beego/context" - "github.com/astaxie/beego/logs" -) - -// ConvertParams converts http method params to values that will be passed to the method controller as arguments -func ConvertParams(methodParams []*MethodParam, methodType reflect.Type, ctx *beecontext.Context) (result []reflect.Value) { - result = make([]reflect.Value, 0, len(methodParams)) - for i := 0; i < len(methodParams); i++ { - reflectValue := convertParam(methodParams[i], methodType.In(i), ctx) - result = append(result, reflectValue) - } - return -} - -func convertParam(param *MethodParam, paramType reflect.Type, ctx *beecontext.Context) (result reflect.Value) { - paramValue := getParamValue(param, ctx) - if paramValue == "" { - if param.required { - ctx.Abort(400, fmt.Sprintf("Missing parameter %s", param.name)) - } else { - paramValue = param.defaultValue - } - } - - reflectValue, err := parseValue(param, paramValue, paramType) - if err != nil { - logs.Debug(fmt.Sprintf("Error converting param %s to type %s. Value: %v, Error: %s", param.name, paramType, paramValue, err)) - ctx.Abort(400, fmt.Sprintf("Invalid parameter %s. Can not convert %v to type %s", param.name, paramValue, paramType)) - } - - return reflectValue -} - -func getParamValue(param *MethodParam, ctx *beecontext.Context) string { - switch param.in { - case body: - return string(ctx.Input.RequestBody) - case header: - return ctx.Input.Header(param.name) - case path: - return ctx.Input.Query(":" + param.name) - default: - return ctx.Input.Query(param.name) - } -} - -func parseValue(param *MethodParam, paramValue string, paramType reflect.Type) (result reflect.Value, err error) { - if paramValue == "" { - return reflect.Zero(paramType), nil - } - parser := getParser(param, paramType) - value, err := parser.parse(paramValue, paramType) - if err != nil { - return result, err - } - - return safeConvert(reflect.ValueOf(value), paramType) -} - -func safeConvert(value reflect.Value, t reflect.Type) (result reflect.Value, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - err, ok = r.(error) - if !ok { - err = fmt.Errorf("%v", r) - } - } - }() - result = value.Convert(t) - return -} diff --git a/vendor/github.com/astaxie/beego/context/param/methodparams.go b/vendor/github.com/astaxie/beego/context/param/methodparams.go deleted file mode 100644 index cd6708a27..000000000 --- a/vendor/github.com/astaxie/beego/context/param/methodparams.go +++ /dev/null @@ -1,69 +0,0 @@ -package param - -import ( - "fmt" - "strings" -) - -//MethodParam keeps param information to be auto passed to controller methods -type MethodParam struct { - name string - in paramType - required bool - defaultValue string -} - -type paramType byte - -const ( - param paramType = iota - path - body - header -) - -//New creates a new MethodParam with name and specific options -func New(name string, opts ...MethodParamOption) *MethodParam { - return newParam(name, nil, opts) -} - -func newParam(name string, parser paramParser, opts []MethodParamOption) (param *MethodParam) { - param = &MethodParam{name: name} - for _, option := range opts { - option(param) - } - return -} - -//Make creates an array of MethodParmas or an empty array -func Make(list ...*MethodParam) []*MethodParam { - if len(list) > 0 { - return list - } - return nil -} - -func (mp *MethodParam) String() string { - options := []string{} - result := "param.New(\"" + mp.name + "\"" - if mp.required { - options = append(options, "param.IsRequired") - } - switch mp.in { - case path: - options = append(options, "param.InPath") - case body: - options = append(options, "param.InBody") - case header: - options = append(options, "param.InHeader") - } - if mp.defaultValue != "" { - options = append(options, fmt.Sprintf(`param.Default("%s")`, mp.defaultValue)) - } - if len(options) > 0 { - result += ", " - } - result += strings.Join(options, ", ") - result += ")" - return result -} diff --git a/vendor/github.com/astaxie/beego/context/param/options.go b/vendor/github.com/astaxie/beego/context/param/options.go deleted file mode 100644 index 3d5ba013e..000000000 --- a/vendor/github.com/astaxie/beego/context/param/options.go +++ /dev/null @@ -1,37 +0,0 @@ -package param - -import ( - "fmt" -) - -// MethodParamOption defines a func which apply options on a MethodParam -type MethodParamOption func(*MethodParam) - -// IsRequired indicates that this param is required and can not be omitted from the http request -var IsRequired MethodParamOption = func(p *MethodParam) { - p.required = true -} - -// InHeader indicates that this param is passed via an http header -var InHeader MethodParamOption = func(p *MethodParam) { - p.in = header -} - -// InPath indicates that this param is part of the URL path -var InPath MethodParamOption = func(p *MethodParam) { - p.in = path -} - -// InBody indicates that this param is passed as an http request body -var InBody MethodParamOption = func(p *MethodParam) { - p.in = body -} - -// Default provides a default value for the http param -func Default(defaultValue interface{}) MethodParamOption { - return func(p *MethodParam) { - if defaultValue != nil { - p.defaultValue = fmt.Sprint(defaultValue) - } - } -} diff --git a/vendor/github.com/astaxie/beego/context/param/parsers.go b/vendor/github.com/astaxie/beego/context/param/parsers.go deleted file mode 100644 index 421aecf08..000000000 --- a/vendor/github.com/astaxie/beego/context/param/parsers.go +++ /dev/null @@ -1,149 +0,0 @@ -package param - -import ( - "encoding/json" - "reflect" - "strconv" - "strings" - "time" -) - -type paramParser interface { - parse(value string, toType reflect.Type) (interface{}, error) -} - -func getParser(param *MethodParam, t reflect.Type) paramParser { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return intParser{} - case reflect.Slice: - if t.Elem().Kind() == reflect.Uint8 { //treat []byte as string - return stringParser{} - } - if param.in == body { - return jsonParser{} - } - elemParser := getParser(param, t.Elem()) - if elemParser == (jsonParser{}) { - return elemParser - } - return sliceParser(elemParser) - case reflect.Bool: - return boolParser{} - case reflect.String: - return stringParser{} - case reflect.Float32, reflect.Float64: - return floatParser{} - case reflect.Ptr: - elemParser := getParser(param, t.Elem()) - if elemParser == (jsonParser{}) { - return elemParser - } - return ptrParser(elemParser) - default: - if t.PkgPath() == "time" && t.Name() == "Time" { - return timeParser{} - } - return jsonParser{} - } -} - -type parserFunc func(value string, toType reflect.Type) (interface{}, error) - -func (f parserFunc) parse(value string, toType reflect.Type) (interface{}, error) { - return f(value, toType) -} - -type boolParser struct { -} - -func (p boolParser) parse(value string, toType reflect.Type) (interface{}, error) { - return strconv.ParseBool(value) -} - -type stringParser struct { -} - -func (p stringParser) parse(value string, toType reflect.Type) (interface{}, error) { - return value, nil -} - -type intParser struct { -} - -func (p intParser) parse(value string, toType reflect.Type) (interface{}, error) { - return strconv.Atoi(value) -} - -type floatParser struct { -} - -func (p floatParser) parse(value string, toType reflect.Type) (interface{}, error) { - if toType.Kind() == reflect.Float32 { - res, err := strconv.ParseFloat(value, 32) - if err != nil { - return nil, err - } - return float32(res), nil - } - return strconv.ParseFloat(value, 64) -} - -type timeParser struct { -} - -func (p timeParser) parse(value string, toType reflect.Type) (result interface{}, err error) { - result, err = time.Parse(time.RFC3339, value) - if err != nil { - result, err = time.Parse("2006-01-02", value) - } - return -} - -type jsonParser struct { -} - -func (p jsonParser) parse(value string, toType reflect.Type) (interface{}, error) { - pResult := reflect.New(toType) - v := pResult.Interface() - err := json.Unmarshal([]byte(value), v) - if err != nil { - return nil, err - } - return pResult.Elem().Interface(), nil -} - -func sliceParser(elemParser paramParser) paramParser { - return parserFunc(func(value string, toType reflect.Type) (interface{}, error) { - values := strings.Split(value, ",") - result := reflect.MakeSlice(toType, 0, len(values)) - elemType := toType.Elem() - for _, v := range values { - parsedValue, err := elemParser.parse(v, elemType) - if err != nil { - return nil, err - } - result = reflect.Append(result, reflect.ValueOf(parsedValue)) - } - return result.Interface(), nil - }) -} - -func ptrParser(elemParser paramParser) paramParser { - return parserFunc(func(value string, toType reflect.Type) (interface{}, error) { - parsedValue, err := elemParser.parse(value, toType.Elem()) - if err != nil { - return nil, err - } - newValPtr := reflect.New(toType.Elem()) - newVal := reflect.Indirect(newValPtr) - convertedVal, err := safeConvert(reflect.ValueOf(parsedValue), toType.Elem()) - if err != nil { - return nil, err - } - - newVal.Set(convertedVal) - return newValPtr.Interface(), nil - }) -} diff --git a/vendor/github.com/astaxie/beego/context/renderer.go b/vendor/github.com/astaxie/beego/context/renderer.go deleted file mode 100644 index 36a7cb53f..000000000 --- a/vendor/github.com/astaxie/beego/context/renderer.go +++ /dev/null @@ -1,12 +0,0 @@ -package context - -// Renderer defines an http response renderer -type Renderer interface { - Render(ctx *Context) -} - -type rendererFunc func(ctx *Context) - -func (f rendererFunc) Render(ctx *Context) { - f(ctx) -} diff --git a/vendor/github.com/astaxie/beego/context/response.go b/vendor/github.com/astaxie/beego/context/response.go deleted file mode 100644 index 9c3c715a2..000000000 --- a/vendor/github.com/astaxie/beego/context/response.go +++ /dev/null @@ -1,27 +0,0 @@ -package context - -import ( - "strconv" - - "net/http" -) - -const ( - //BadRequest indicates http error 400 - BadRequest StatusCode = http.StatusBadRequest - - //NotFound indicates http error 404 - NotFound StatusCode = http.StatusNotFound -) - -// StatusCode sets the http response status code -type StatusCode int - -func (s StatusCode) Error() string { - return strconv.Itoa(int(s)) -} - -// Render sets the http status code -func (s StatusCode) Render(ctx *Context) { - ctx.Output.SetStatus(int(s)) -} diff --git a/vendor/github.com/astaxie/beego/controller.go b/vendor/github.com/astaxie/beego/controller.go deleted file mode 100644 index 4b8f9807f..000000000 --- a/vendor/github.com/astaxie/beego/controller.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "bytes" - "errors" - "html/template" - "io" - "mime/multipart" - "net/http" - "net/url" - "os" - "reflect" - "strconv" - "strings" - - "github.com/astaxie/beego/context" - "github.com/astaxie/beego/context/param" - "github.com/astaxie/beego/session" -) - -var ( - // ErrAbort custom error when user stop request handler manually. - ErrAbort = errors.New("User stop run") - // GlobalControllerRouter store comments with controller. pkgpath+controller:comments - GlobalControllerRouter = make(map[string][]ControllerComments) -) - -// ControllerFilter store the filter for controller -type ControllerFilter struct { - Pattern string - Pos int - Filter FilterFunc - ReturnOnOutput bool - ResetParams bool -} - -// ControllerFilterComments store the comment for controller level filter -type ControllerFilterComments struct { - Pattern string - Pos int - Filter string // NOQA - ReturnOnOutput bool - ResetParams bool -} - -// ControllerImportComments store the import comment for controller needed -type ControllerImportComments struct { - ImportPath string - ImportAlias string -} - -// ControllerComments store the comment for the controller method -type ControllerComments struct { - Method string - Router string - Filters []*ControllerFilter - ImportComments []*ControllerImportComments - FilterComments []*ControllerFilterComments - AllowHTTPMethods []string - Params []map[string]string - MethodParams []*param.MethodParam -} - -// ControllerCommentsSlice implements the sort interface -type ControllerCommentsSlice []ControllerComments - -func (p ControllerCommentsSlice) Len() int { return len(p) } -func (p ControllerCommentsSlice) Less(i, j int) bool { return p[i].Router < p[j].Router } -func (p ControllerCommentsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Controller defines some basic http request handler operations, such as -// http context, template and view, session and xsrf. -type Controller struct { - // context data - Ctx *context.Context - Data map[interface{}]interface{} - - // route controller info - controllerName string - actionName string - methodMapping map[string]func() //method:routertree - gotofunc string - AppController interface{} - - // template data - TplName string - ViewPath string - Layout string - LayoutSections map[string]string // the key is the section name and the value is the template name - TplPrefix string - TplExt string - EnableRender bool - - // xsrf data - _xsrfToken string - XSRFExpire int - EnableXSRF bool - - // session - CruSession session.Store -} - -// ControllerInterface is an interface to uniform all controller handler. -type ControllerInterface interface { - Init(ct *context.Context, controllerName, actionName string, app interface{}) - Prepare() - Get() - Post() - Delete() - Put() - Head() - Patch() - Options() - Finish() - Render() error - XSRFToken() string - CheckXSRFCookie() bool - HandlerFunc(fn string) bool - URLMapping() -} - -// Init generates default values of controller operations. -func (c *Controller) Init(ctx *context.Context, controllerName, actionName string, app interface{}) { - c.Layout = "" - c.TplName = "" - c.controllerName = controllerName - c.actionName = actionName - c.Ctx = ctx - c.TplExt = "tpl" - c.AppController = app - c.EnableRender = true - c.EnableXSRF = true - c.Data = ctx.Input.Data() - c.methodMapping = make(map[string]func()) -} - -// Prepare runs after Init before request function execution. -func (c *Controller) Prepare() {} - -// Finish runs after request function execution. -func (c *Controller) Finish() {} - -// Get adds a request function to handle GET request. -func (c *Controller) Get() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// Post adds a request function to handle POST request. -func (c *Controller) Post() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// Delete adds a request function to handle DELETE request. -func (c *Controller) Delete() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// Put adds a request function to handle PUT request. -func (c *Controller) Put() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// Head adds a request function to handle HEAD request. -func (c *Controller) Head() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// Patch adds a request function to handle PATCH request. -func (c *Controller) Patch() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// Options adds a request function to handle OPTIONS request. -func (c *Controller) Options() { - http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) -} - -// HandlerFunc call function with the name -func (c *Controller) HandlerFunc(fnname string) bool { - if v, ok := c.methodMapping[fnname]; ok { - v() - return true - } - return false -} - -// URLMapping register the internal Controller router. -func (c *Controller) URLMapping() {} - -// Mapping the method to function -func (c *Controller) Mapping(method string, fn func()) { - c.methodMapping[method] = fn -} - -// Render sends the response with rendered template bytes as text/html type. -func (c *Controller) Render() error { - if !c.EnableRender { - return nil - } - rb, err := c.RenderBytes() - if err != nil { - return err - } - - if c.Ctx.ResponseWriter.Header().Get("Content-Type") == "" { - c.Ctx.Output.Header("Content-Type", "text/html; charset=utf-8") - } - - return c.Ctx.Output.Body(rb) -} - -// RenderString returns the rendered template string. Do not send out response. -func (c *Controller) RenderString() (string, error) { - b, e := c.RenderBytes() - return string(b), e -} - -// RenderBytes returns the bytes of rendered template string. Do not send out response. -func (c *Controller) RenderBytes() ([]byte, error) { - buf, err := c.renderTemplate() - //if the controller has set layout, then first get the tplName's content set the content to the layout - if err == nil && c.Layout != "" { - c.Data["LayoutContent"] = template.HTML(buf.String()) - - if c.LayoutSections != nil { - for sectionName, sectionTpl := range c.LayoutSections { - if sectionTpl == "" { - c.Data[sectionName] = "" - continue - } - buf.Reset() - err = ExecuteViewPathTemplate(&buf, sectionTpl, c.viewPath(), c.Data) - if err != nil { - return nil, err - } - c.Data[sectionName] = template.HTML(buf.String()) - } - } - - buf.Reset() - ExecuteViewPathTemplate(&buf, c.Layout, c.viewPath(), c.Data) - } - return buf.Bytes(), err -} - -func (c *Controller) renderTemplate() (bytes.Buffer, error) { - var buf bytes.Buffer - if c.TplName == "" { - c.TplName = strings.ToLower(c.controllerName) + "/" + strings.ToLower(c.actionName) + "." + c.TplExt - } - if c.TplPrefix != "" { - c.TplName = c.TplPrefix + c.TplName - } - if BConfig.RunMode == DEV { - buildFiles := []string{c.TplName} - if c.Layout != "" { - buildFiles = append(buildFiles, c.Layout) - if c.LayoutSections != nil { - for _, sectionTpl := range c.LayoutSections { - if sectionTpl == "" { - continue - } - buildFiles = append(buildFiles, sectionTpl) - } - } - } - BuildTemplate(c.viewPath(), buildFiles...) - } - return buf, ExecuteViewPathTemplate(&buf, c.TplName, c.viewPath(), c.Data) -} - -func (c *Controller) viewPath() string { - if c.ViewPath == "" { - return BConfig.WebConfig.ViewsPath - } - return c.ViewPath -} - -// Redirect sends the redirection response to url with status code. -func (c *Controller) Redirect(url string, code int) { - logAccess(c.Ctx, nil, code) - c.Ctx.Redirect(code, url) -} - -// SetData set the data depending on the accepted -func (c *Controller) SetData(data interface{}) { - accept := c.Ctx.Input.Header("Accept") - switch accept { - case context.ApplicationYAML: - c.Data["yaml"] = data - case context.ApplicationXML, context.TextXML: - c.Data["xml"] = data - default: - c.Data["json"] = data - } -} - -// Abort stops controller handler and show the error data if code is defined in ErrorMap or code string. -func (c *Controller) Abort(code string) { - status, err := strconv.Atoi(code) - if err != nil { - status = 200 - } - c.CustomAbort(status, code) -} - -// CustomAbort stops controller handler and show the error data, it's similar Aborts, but support status code and body. -func (c *Controller) CustomAbort(status int, body string) { - // first panic from ErrorMaps, it is user defined error functions. - if _, ok := ErrorMaps[body]; ok { - c.Ctx.Output.Status = status - panic(body) - } - // last panic user string - c.Ctx.ResponseWriter.WriteHeader(status) - c.Ctx.ResponseWriter.Write([]byte(body)) - panic(ErrAbort) -} - -// StopRun makes panic of USERSTOPRUN error and go to recover function if defined. -func (c *Controller) StopRun() { - panic(ErrAbort) -} - -// URLFor does another controller handler in this request function. -// it goes to this controller method if endpoint is not clear. -func (c *Controller) URLFor(endpoint string, values ...interface{}) string { - if len(endpoint) == 0 { - return "" - } - if endpoint[0] == '.' { - return URLFor(reflect.Indirect(reflect.ValueOf(c.AppController)).Type().Name()+endpoint, values...) - } - return URLFor(endpoint, values...) -} - -// ServeJSON sends a json response with encoding charset. -func (c *Controller) ServeJSON(encoding ...bool) { - var ( - hasIndent = BConfig.RunMode != PROD - hasEncoding = len(encoding) > 0 && encoding[0] - ) - - c.Ctx.Output.JSON(c.Data["json"], hasIndent, hasEncoding) -} - -// ServeJSONP sends a jsonp response. -func (c *Controller) ServeJSONP() { - hasIndent := BConfig.RunMode != PROD - c.Ctx.Output.JSONP(c.Data["jsonp"], hasIndent) -} - -// ServeXML sends xml response. -func (c *Controller) ServeXML() { - hasIndent := BConfig.RunMode != PROD - c.Ctx.Output.XML(c.Data["xml"], hasIndent) -} - -// ServeYAML sends yaml response. -func (c *Controller) ServeYAML() { - c.Ctx.Output.YAML(c.Data["yaml"]) -} - -// ServeFormatted serve YAML, XML OR JSON, depending on the value of the Accept header -func (c *Controller) ServeFormatted(encoding ...bool) { - hasIndent := BConfig.RunMode != PROD - hasEncoding := len(encoding) > 0 && encoding[0] - c.Ctx.Output.ServeFormatted(c.Data, hasIndent, hasEncoding) -} - -// Input returns the input data map from POST or PUT request body and query string. -func (c *Controller) Input() url.Values { - if c.Ctx.Request.Form == nil { - c.Ctx.Request.ParseForm() - } - return c.Ctx.Request.Form -} - -// ParseForm maps input data map to obj struct. -func (c *Controller) ParseForm(obj interface{}) error { - return ParseForm(c.Input(), obj) -} - -// GetString returns the input value by key string or the default value while it's present and input is blank -func (c *Controller) GetString(key string, def ...string) string { - if v := c.Ctx.Input.Query(key); v != "" { - return v - } - if len(def) > 0 { - return def[0] - } - return "" -} - -// GetStrings returns the input string slice by key string or the default value while it's present and input is blank -// it's designed for multi-value input field such as checkbox(input[type=checkbox]), multi-selection. -func (c *Controller) GetStrings(key string, def ...[]string) []string { - var defv []string - if len(def) > 0 { - defv = def[0] - } - - if f := c.Input(); f == nil { - return defv - } else if vs := f[key]; len(vs) > 0 { - return vs - } - - return defv -} - -// GetInt returns input as an int or the default value while it's present and input is blank -func (c *Controller) GetInt(key string, def ...int) (int, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - return strconv.Atoi(strv) -} - -// GetInt8 return input as an int8 or the default value while it's present and input is blank -func (c *Controller) GetInt8(key string, def ...int8) (int8, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - i64, err := strconv.ParseInt(strv, 10, 8) - return int8(i64), err -} - -// GetUint8 return input as an uint8 or the default value while it's present and input is blank -func (c *Controller) GetUint8(key string, def ...uint8) (uint8, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - u64, err := strconv.ParseUint(strv, 10, 8) - return uint8(u64), err -} - -// GetInt16 returns input as an int16 or the default value while it's present and input is blank -func (c *Controller) GetInt16(key string, def ...int16) (int16, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - i64, err := strconv.ParseInt(strv, 10, 16) - return int16(i64), err -} - -// GetUint16 returns input as an uint16 or the default value while it's present and input is blank -func (c *Controller) GetUint16(key string, def ...uint16) (uint16, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - u64, err := strconv.ParseUint(strv, 10, 16) - return uint16(u64), err -} - -// GetInt32 returns input as an int32 or the default value while it's present and input is blank -func (c *Controller) GetInt32(key string, def ...int32) (int32, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - i64, err := strconv.ParseInt(strv, 10, 32) - return int32(i64), err -} - -// GetUint32 returns input as an uint32 or the default value while it's present and input is blank -func (c *Controller) GetUint32(key string, def ...uint32) (uint32, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - u64, err := strconv.ParseUint(strv, 10, 32) - return uint32(u64), err -} - -// GetInt64 returns input value as int64 or the default value while it's present and input is blank. -func (c *Controller) GetInt64(key string, def ...int64) (int64, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - return strconv.ParseInt(strv, 10, 64) -} - -// GetUint64 returns input value as uint64 or the default value while it's present and input is blank. -func (c *Controller) GetUint64(key string, def ...uint64) (uint64, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - return strconv.ParseUint(strv, 10, 64) -} - -// GetBool returns input value as bool or the default value while it's present and input is blank. -func (c *Controller) GetBool(key string, def ...bool) (bool, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - return strconv.ParseBool(strv) -} - -// GetFloat returns input value as float64 or the default value while it's present and input is blank. -func (c *Controller) GetFloat(key string, def ...float64) (float64, error) { - strv := c.Ctx.Input.Query(key) - if len(strv) == 0 && len(def) > 0 { - return def[0], nil - } - return strconv.ParseFloat(strv, 64) -} - -// GetFile returns the file data in file upload field named as key. -// it returns the first one of multi-uploaded files. -func (c *Controller) GetFile(key string) (multipart.File, *multipart.FileHeader, error) { - return c.Ctx.Request.FormFile(key) -} - -// GetFiles return multi-upload files -// files, err:=c.GetFiles("myfiles") -// if err != nil { -// http.Error(w, err.Error(), http.StatusNoContent) -// return -// } -// for i, _ := range files { -// //for each fileheader, get a handle to the actual file -// file, err := files[i].Open() -// defer file.Close() -// if err != nil { -// http.Error(w, err.Error(), http.StatusInternalServerError) -// return -// } -// //create destination file making sure the path is writeable. -// dst, err := os.Create("upload/" + files[i].Filename) -// defer dst.Close() -// if err != nil { -// http.Error(w, err.Error(), http.StatusInternalServerError) -// return -// } -// //copy the uploaded file to the destination file -// if _, err := io.Copy(dst, file); err != nil { -// http.Error(w, err.Error(), http.StatusInternalServerError) -// return -// } -// } -func (c *Controller) GetFiles(key string) ([]*multipart.FileHeader, error) { - if files, ok := c.Ctx.Request.MultipartForm.File[key]; ok { - return files, nil - } - return nil, http.ErrMissingFile -} - -// SaveToFile saves uploaded file to new path. -// it only operates the first one of mutil-upload form file field. -func (c *Controller) SaveToFile(fromfile, tofile string) error { - file, _, err := c.Ctx.Request.FormFile(fromfile) - if err != nil { - return err - } - defer file.Close() - f, err := os.OpenFile(tofile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) - if err != nil { - return err - } - defer f.Close() - io.Copy(f, file) - return nil -} - -// StartSession starts session and load old session data info this controller. -func (c *Controller) StartSession() session.Store { - if c.CruSession == nil { - c.CruSession = c.Ctx.Input.CruSession - } - return c.CruSession -} - -// SetSession puts value into session. -func (c *Controller) SetSession(name interface{}, value interface{}) { - if c.CruSession == nil { - c.StartSession() - } - c.CruSession.Set(name, value) -} - -// GetSession gets value from session. -func (c *Controller) GetSession(name interface{}) interface{} { - if c.CruSession == nil { - c.StartSession() - } - return c.CruSession.Get(name) -} - -// DelSession removes value from session. -func (c *Controller) DelSession(name interface{}) { - if c.CruSession == nil { - c.StartSession() - } - c.CruSession.Delete(name) -} - -// SessionRegenerateID regenerates session id for this session. -// the session data have no changes. -func (c *Controller) SessionRegenerateID() { - if c.CruSession != nil { - c.CruSession.SessionRelease(c.Ctx.ResponseWriter) - } - c.CruSession = GlobalSessions.SessionRegenerateID(c.Ctx.ResponseWriter, c.Ctx.Request) - c.Ctx.Input.CruSession = c.CruSession -} - -// DestroySession cleans session data and session cookie. -func (c *Controller) DestroySession() { - c.Ctx.Input.CruSession.Flush() - c.Ctx.Input.CruSession = nil - GlobalSessions.SessionDestroy(c.Ctx.ResponseWriter, c.Ctx.Request) -} - -// IsAjax returns this request is ajax or not. -func (c *Controller) IsAjax() bool { - return c.Ctx.Input.IsAjax() -} - -// GetSecureCookie returns decoded cookie value from encoded browser cookie values. -func (c *Controller) GetSecureCookie(Secret, key string) (string, bool) { - return c.Ctx.GetSecureCookie(Secret, key) -} - -// SetSecureCookie puts value into cookie after encoded the value. -func (c *Controller) SetSecureCookie(Secret, name, value string, others ...interface{}) { - c.Ctx.SetSecureCookie(Secret, name, value, others...) -} - -// XSRFToken creates a CSRF token string and returns. -func (c *Controller) XSRFToken() string { - if c._xsrfToken == "" { - expire := int64(BConfig.WebConfig.XSRFExpire) - if c.XSRFExpire > 0 { - expire = int64(c.XSRFExpire) - } - c._xsrfToken = c.Ctx.XSRFToken(BConfig.WebConfig.XSRFKey, expire) - } - return c._xsrfToken -} - -// CheckXSRFCookie checks xsrf token in this request is valid or not. -// the token can provided in request header "X-Xsrftoken" and "X-CsrfToken" -// or in form field value named as "_xsrf". -func (c *Controller) CheckXSRFCookie() bool { - if !c.EnableXSRF { - return true - } - return c.Ctx.CheckXSRFCookie() -} - -// XSRFFormHTML writes an input field contains xsrf token value. -func (c *Controller) XSRFFormHTML() string { - return `` -} - -// GetControllerAndAction gets the executing controller name and action name. -func (c *Controller) GetControllerAndAction() (string, string) { - return c.controllerName, c.actionName -} diff --git a/vendor/github.com/astaxie/beego/doc.go b/vendor/github.com/astaxie/beego/doc.go deleted file mode 100644 index 8825bd299..000000000 --- a/vendor/github.com/astaxie/beego/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Package beego provide a MVC framework -beego: an open-source, high-performance, modular, full-stack web framework - -It is used for rapid development of RESTful APIs, web apps and backend services in Go. -beego is inspired by Tornado, Sinatra and Flask with the added benefit of some Go-specific features such as interfaces and struct embedding. - - package main - import "github.com/astaxie/beego" - - func main() { - beego.Run() - } - -more information: http://beego.me -*/ -package beego diff --git a/vendor/github.com/astaxie/beego/error.go b/vendor/github.com/astaxie/beego/error.go deleted file mode 100644 index 727830df3..000000000 --- a/vendor/github.com/astaxie/beego/error.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "fmt" - "html/template" - "net/http" - "reflect" - "runtime" - "strconv" - "strings" - - "github.com/astaxie/beego/context" - "github.com/astaxie/beego/utils" -) - -const ( - errorTypeHandler = iota - errorTypeController -) - -var tpl = ` - - - - - beego application error - - - - - -
- - - - - - - - - - -
Request Method: {{.RequestMethod}}
Request URL: {{.RequestURL}}
RemoteAddr: {{.RemoteAddr }}
-
- Stack -
{{.Stack}}
-
-
- - - -` - -// render default application error page with error and stack string. -func showErr(err interface{}, ctx *context.Context, stack string) { - t, _ := template.New("beegoerrortemp").Parse(tpl) - data := map[string]string{ - "AppError": fmt.Sprintf("%s:%v", BConfig.AppName, err), - "RequestMethod": ctx.Input.Method(), - "RequestURL": ctx.Input.URI(), - "RemoteAddr": ctx.Input.IP(), - "Stack": stack, - "BeegoVersion": VERSION, - "GoVersion": runtime.Version(), - } - t.Execute(ctx.ResponseWriter, data) -} - -var errtpl = ` - - - - - {{.Title}} - - - -
-
- -
- {{.Content}} - Go Home
- -
Powered by beego {{.BeegoVersion}} -
-
-
- - -` - -type errorInfo struct { - controllerType reflect.Type - handler http.HandlerFunc - method string - errorType int -} - -// ErrorMaps holds map of http handlers for each error string. -// there is 10 kinds default error(40x and 50x) -var ErrorMaps = make(map[string]*errorInfo, 10) - -// show 401 unauthorized error. -func unauthorized(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 401, - "
The page you have requested can't be authorized."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 402 Payment Required -func paymentRequired(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 402, - "
The page you have requested Payment Required."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 403 forbidden error. -func forbidden(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 403, - "
The page you have requested is forbidden."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 422 missing xsrf token -func missingxsrf(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 422, - "
The page you have requested is forbidden."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 417 invalid xsrf token -func invalidxsrf(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 417, - "
The page you have requested is forbidden."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 404 not found error. -func notFound(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 404, - "
The page you have requested has flown the coop."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 405 Method Not Allowed -func methodNotAllowed(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 405, - "
The method you have requested Not Allowed."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 500 internal server error. -func internalServerError(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 500, - "
The page you have requested is down right now."+ - "

", - ) -} - -// show 501 Not Implemented. -func notImplemented(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 501, - "
The page you have requested is Not Implemented."+ - "

", - ) -} - -// show 502 Bad Gateway. -func badGateway(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 502, - "
The page you have requested is down right now."+ - "

", - ) -} - -// show 503 service unavailable error. -func serviceUnavailable(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 503, - "
The page you have requested is unavailable."+ - "
Perhaps you are here because:"+ - "

", - ) -} - -// show 504 Gateway Timeout. -func gatewayTimeout(rw http.ResponseWriter, r *http.Request) { - responseError(rw, r, - 504, - "
The page you have requested is unavailable"+ - "
Perhaps you are here because:"+ - "

", - ) -} - -func responseError(rw http.ResponseWriter, r *http.Request, errCode int, errContent string) { - t, _ := template.New("beegoerrortemp").Parse(errtpl) - data := M{ - "Title": http.StatusText(errCode), - "BeegoVersion": VERSION, - "Content": template.HTML(errContent), - } - t.Execute(rw, data) -} - -// ErrorHandler registers http.HandlerFunc to each http err code string. -// usage: -// beego.ErrorHandler("404",NotFound) -// beego.ErrorHandler("500",InternalServerError) -func ErrorHandler(code string, h http.HandlerFunc) *App { - ErrorMaps[code] = &errorInfo{ - errorType: errorTypeHandler, - handler: h, - method: code, - } - return BeeApp -} - -// ErrorController registers ControllerInterface to each http err code string. -// usage: -// beego.ErrorController(&controllers.ErrorController{}) -func ErrorController(c ControllerInterface) *App { - reflectVal := reflect.ValueOf(c) - rt := reflectVal.Type() - ct := reflect.Indirect(reflectVal).Type() - for i := 0; i < rt.NumMethod(); i++ { - methodName := rt.Method(i).Name - if !utils.InSlice(methodName, exceptMethod) && strings.HasPrefix(methodName, "Error") { - errName := strings.TrimPrefix(methodName, "Error") - ErrorMaps[errName] = &errorInfo{ - errorType: errorTypeController, - controllerType: ct, - method: methodName, - } - } - } - return BeeApp -} - -// Exception Write HttpStatus with errCode and Exec error handler if exist. -func Exception(errCode uint64, ctx *context.Context) { - exception(strconv.FormatUint(errCode, 10), ctx) -} - -// show error string as simple text message. -// if error string is empty, show 503 or 500 error as default. -func exception(errCode string, ctx *context.Context) { - atoi := func(code string) int { - v, err := strconv.Atoi(code) - if err == nil { - return v - } - if ctx.Output.Status == 0 { - return 503 - } - return ctx.Output.Status - } - - for _, ec := range []string{errCode, "503", "500"} { - if h, ok := ErrorMaps[ec]; ok { - executeError(h, ctx, atoi(ec)) - return - } - } - //if 50x error has been removed from errorMap - ctx.ResponseWriter.WriteHeader(atoi(errCode)) - ctx.WriteString(errCode) -} - -func executeError(err *errorInfo, ctx *context.Context, code int) { - //make sure to log the error in the access log - logAccess(ctx, nil, code) - - if err.errorType == errorTypeHandler { - ctx.ResponseWriter.WriteHeader(code) - err.handler(ctx.ResponseWriter, ctx.Request) - return - } - if err.errorType == errorTypeController { - ctx.Output.SetStatus(code) - //Invoke the request handler - vc := reflect.New(err.controllerType) - execController, ok := vc.Interface().(ControllerInterface) - if !ok { - panic("controller is not ControllerInterface") - } - //call the controller init function - execController.Init(ctx, err.controllerType.Name(), err.method, vc.Interface()) - - //call prepare function - execController.Prepare() - - execController.URLMapping() - - method := vc.MethodByName(err.method) - method.Call([]reflect.Value{}) - - //render template - if BConfig.WebConfig.AutoRender { - if err := execController.Render(); err != nil { - panic(err) - } - } - - // finish all runrouter. release resource - execController.Finish() - } -} diff --git a/vendor/github.com/astaxie/beego/filter.go b/vendor/github.com/astaxie/beego/filter.go deleted file mode 100644 index 9cc6e9134..000000000 --- a/vendor/github.com/astaxie/beego/filter.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import "github.com/astaxie/beego/context" - -// FilterFunc defines a filter function which is invoked before the controller handler is executed. -type FilterFunc func(*context.Context) - -// FilterRouter defines a filter operation which is invoked before the controller handler is executed. -// It can match the URL against a pattern, and execute a filter function -// when a request with a matching URL arrives. -type FilterRouter struct { - filterFunc FilterFunc - tree *Tree - pattern string - returnOnOutput bool - resetParams bool -} - -// ValidRouter checks if the current request is matched by this filter. -// If the request is matched, the values of the URL parameters defined -// by the filter pattern are also returned. -func (f *FilterRouter) ValidRouter(url string, ctx *context.Context) bool { - isOk := f.tree.Match(url, ctx) - if isOk != nil { - if b, ok := isOk.(bool); ok { - return b - } - } - return false -} diff --git a/vendor/github.com/astaxie/beego/flash.go b/vendor/github.com/astaxie/beego/flash.go deleted file mode 100644 index a6485a17e..000000000 --- a/vendor/github.com/astaxie/beego/flash.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "fmt" - "net/url" - "strings" -) - -// FlashData is a tools to maintain data when using across request. -type FlashData struct { - Data map[string]string -} - -// NewFlash return a new empty FlashData struct. -func NewFlash() *FlashData { - return &FlashData{ - Data: make(map[string]string), - } -} - -// Set message to flash -func (fd *FlashData) Set(key string, msg string, args ...interface{}) { - if len(args) == 0 { - fd.Data[key] = msg - } else { - fd.Data[key] = fmt.Sprintf(msg, args...) - } -} - -// Success writes success message to flash. -func (fd *FlashData) Success(msg string, args ...interface{}) { - if len(args) == 0 { - fd.Data["success"] = msg - } else { - fd.Data["success"] = fmt.Sprintf(msg, args...) - } -} - -// Notice writes notice message to flash. -func (fd *FlashData) Notice(msg string, args ...interface{}) { - if len(args) == 0 { - fd.Data["notice"] = msg - } else { - fd.Data["notice"] = fmt.Sprintf(msg, args...) - } -} - -// Warning writes warning message to flash. -func (fd *FlashData) Warning(msg string, args ...interface{}) { - if len(args) == 0 { - fd.Data["warning"] = msg - } else { - fd.Data["warning"] = fmt.Sprintf(msg, args...) - } -} - -// Error writes error message to flash. -func (fd *FlashData) Error(msg string, args ...interface{}) { - if len(args) == 0 { - fd.Data["error"] = msg - } else { - fd.Data["error"] = fmt.Sprintf(msg, args...) - } -} - -// Store does the saving operation of flash data. -// the data are encoded and saved in cookie. -func (fd *FlashData) Store(c *Controller) { - c.Data["flash"] = fd.Data - var flashValue string - for key, value := range fd.Data { - flashValue += "\x00" + key + "\x23" + BConfig.WebConfig.FlashSeparator + "\x23" + value + "\x00" - } - c.Ctx.SetCookie(BConfig.WebConfig.FlashName, url.QueryEscape(flashValue), 0, "/") -} - -// ReadFromRequest parsed flash data from encoded values in cookie. -func ReadFromRequest(c *Controller) *FlashData { - flash := NewFlash() - if cookie, err := c.Ctx.Request.Cookie(BConfig.WebConfig.FlashName); err == nil { - v, _ := url.QueryUnescape(cookie.Value) - vals := strings.Split(v, "\x00") - for _, v := range vals { - if len(v) > 0 { - kv := strings.Split(v, "\x23"+BConfig.WebConfig.FlashSeparator+"\x23") - if len(kv) == 2 { - flash.Data[kv[0]] = kv[1] - } - } - } - //read one time then delete it - c.Ctx.SetCookie(BConfig.WebConfig.FlashName, "", -1, "/") - } - c.Data["flash"] = flash.Data - return flash -} diff --git a/vendor/github.com/astaxie/beego/fs.go b/vendor/github.com/astaxie/beego/fs.go deleted file mode 100644 index bf7002ad0..000000000 --- a/vendor/github.com/astaxie/beego/fs.go +++ /dev/null @@ -1,74 +0,0 @@ -package beego - -import ( - "net/http" - "os" - "path/filepath" -) - -type FileSystem struct { -} - -func (d FileSystem) Open(name string) (http.File, error) { - return os.Open(name) -} - -// Walk walks the file tree rooted at root in filesystem, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. -func Walk(fs http.FileSystem, root string, walkFn filepath.WalkFunc) error { - - f, err := fs.Open(root) - if err != nil { - return err - } - info, err := f.Stat() - if err != nil { - err = walkFn(root, nil, err) - } else { - err = walk(fs, root, info, walkFn) - } - if err == filepath.SkipDir { - return nil - } - return err -} - -// walk recursively descends path, calling walkFn. -func walk(fs http.FileSystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - var err error - if !info.IsDir() { - return walkFn(path, info, nil) - } - - dir, err := fs.Open(path) - defer dir.Close() - if err != nil { - if err1 := walkFn(path, info, err); err1 != nil { - return err1 - } - return err - } - dirs, err := dir.Readdir(-1) - err1 := walkFn(path, info, err) - // If err != nil, walk can't walk into this directory. - // err1 != nil means walkFn want walk to skip this directory or stop walking. - // Therefore, if one of err and err1 isn't nil, walk will return. - if err != nil || err1 != nil { - // The caller's behavior is controlled by the return value, which is decided - // by walkFn. walkFn may ignore err and return nil. - // If walkFn returns SkipDir, it will be handled by the caller. - // So walk should return whatever walkFn returns. - return err1 - } - - for _, fileInfo := range dirs { - filename := filepath.Join(path, fileInfo.Name()) - if err = walk(fs, filename, fileInfo, walkFn); err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/astaxie/beego/go.mod b/vendor/github.com/astaxie/beego/go.mod deleted file mode 100644 index 9b3eb08e3..000000000 --- a/vendor/github.com/astaxie/beego/go.mod +++ /dev/null @@ -1,39 +0,0 @@ -module github.com/astaxie/beego - -require ( - github.com/Knetic/govaluate v3.0.0+incompatible // indirect - github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd - github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542 - github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff - github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 - github.com/casbin/casbin v1.7.0 - github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 - github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb - github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c // indirect - github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect - github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect - github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 // indirect - github.com/elazarl/go-bindata-assetfs v1.0.0 - github.com/go-redis/redis v6.14.2+incompatible - github.com/go-sql-driver/mysql v1.4.1 - github.com/gogo/protobuf v1.1.1 - github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect - github.com/gomodule/redigo v2.0.0+incompatible - github.com/lib/pq v1.0.0 - github.com/mattn/go-sqlite3 v1.10.0 - github.com/pelletier/go-toml v1.2.0 // indirect - github.com/pkg/errors v0.8.0 // indirect - github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect - github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373 - github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect - github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec - github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c // indirect - github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b // indirect - golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 - golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect - gopkg.in/yaml.v2 v2.2.1 -) - -replace golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 => github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85 - -replace gopkg.in/yaml.v2 v2.2.1 => github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d diff --git a/vendor/github.com/astaxie/beego/go.sum b/vendor/github.com/astaxie/beego/go.sum deleted file mode 100644 index fbe3a8c32..000000000 --- a/vendor/github.com/astaxie/beego/go.sum +++ /dev/null @@ -1,67 +0,0 @@ -github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg= -github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd h1:jZtX5jh5IOMu0fpOTC3ayh6QGSPJ/KWOv1lgPvbRw1M= -github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ= -github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542 h1:nYXb+3jF6Oq/j8R/y90XrKpreCxIalBWfeyeKymgOPk= -github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU= -github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff h1:/kO0p2RTGLB8R5gub7ps0GmYpB2O8LXEoPq8tzFDCUI= -github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff/go.mod h1:PhH1ZhyCzHKt4uAasyx+ljRCgoezetRNf59CUtwUkqY= -github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g= -github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= -github.com/casbin/casbin v1.7.0 h1:PuzlE8w0JBg/DhIqnkF1Dewf3z+qmUZMVN07PonvVUQ= -github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= -github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb h1:w3RapLhkA5+km9Z8vUkC6VCaskduJXvXwJg5neKnfDU= -github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= -github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c h1:K4FIibkr4//ziZKOKmt4RL0YImuTjLLBtwElf+F2lSQ= -github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= -github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ= -github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= -github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= -github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0= -github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d h1:xy93KVe+KrIIwWDEAfQBdIfsiHJkepbYsDr+VY3g9/o= -github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:B7ZbAFz7NOmvpUE5RGtu3u0WIizy5GdvbNpEf4RPnWs= -github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:uZvAcrsnNaCxlh1HorK5dUQHGmEKPh2H/Rl1kehswPo= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= -github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM= -github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= -github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373 h1:p6IxqQMjab30l4lb9mmkIkkcE1yv6o0SKbPhW5pxqHI= -github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg= -github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs= -github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA= -github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec h1:q6XVwXmKvCRHRqesF3cSv6lNqqHi0QWOvgDlSohg8UA= -github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE= -github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c h1:3eGShk3EQf5gJCYW+WzA0TEJQd37HLOmlYF7N0YJwv0= -github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= -github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b h1:0Ve0/CCjiAiyKddUMUn3RwIGlq2iTW4GuVzyoKBYO/8= -github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:et7+NAX3lLIk5qUCTA9QelBjGE/NkhzYw/mhnr0s7nI= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/astaxie/beego/grace/conn.go b/vendor/github.com/astaxie/beego/grace/conn.go deleted file mode 100644 index e020f8507..000000000 --- a/vendor/github.com/astaxie/beego/grace/conn.go +++ /dev/null @@ -1,39 +0,0 @@ -package grace - -import ( - "errors" - "net" - "sync" -) - -type graceConn struct { - net.Conn - server *Server - m sync.Mutex - closed bool -} - -func (c *graceConn) Close() (err error) { - defer func() { - if r := recover(); r != nil { - switch x := r.(type) { - case string: - err = errors.New(x) - case error: - err = x - default: - err = errors.New("Unknown panic") - } - } - }() - - c.m.Lock() - if c.closed { - c.m.Unlock() - return - } - c.server.wg.Done() - c.closed = true - c.m.Unlock() - return c.Conn.Close() -} diff --git a/vendor/github.com/astaxie/beego/grace/grace.go b/vendor/github.com/astaxie/beego/grace/grace.go deleted file mode 100644 index 6ebf8455f..000000000 --- a/vendor/github.com/astaxie/beego/grace/grace.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package grace use to hot reload -// Description: http://grisha.org/blog/2014/06/03/graceful-restart-in-golang/ -// -// Usage: -// -// import( -// "log" -// "net/http" -// "os" -// -// "github.com/astaxie/beego/grace" -// ) -// -// func handler(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("WORLD!")) -// } -// -// func main() { -// mux := http.NewServeMux() -// mux.HandleFunc("/hello", handler) -// -// err := grace.ListenAndServe("localhost:8080", mux) -// if err != nil { -// log.Println(err) -// } -// log.Println("Server on 8080 stopped") -// os.Exit(0) -// } -package grace - -import ( - "flag" - "net/http" - "os" - "strings" - "sync" - "syscall" - "time" -) - -const ( - // PreSignal is the position to add filter before signal - PreSignal = iota - // PostSignal is the position to add filter after signal - PostSignal - // StateInit represent the application inited - StateInit - // StateRunning represent the application is running - StateRunning - // StateShuttingDown represent the application is shutting down - StateShuttingDown - // StateTerminate represent the application is killed - StateTerminate -) - -var ( - regLock *sync.Mutex - runningServers map[string]*Server - runningServersOrder []string - socketPtrOffsetMap map[string]uint - runningServersForked bool - - // DefaultReadTimeOut is the HTTP read timeout - DefaultReadTimeOut time.Duration - // DefaultWriteTimeOut is the HTTP Write timeout - DefaultWriteTimeOut time.Duration - // DefaultMaxHeaderBytes is the Max HTTP Herder size, default is 0, no limit - DefaultMaxHeaderBytes int - // DefaultTimeout is the shutdown server's timeout. default is 60s - DefaultTimeout = 60 * time.Second - - isChild bool - socketOrder string - - hookableSignals []os.Signal -) - -func init() { - flag.BoolVar(&isChild, "graceful", false, "listen on open fd (after forking)") - flag.StringVar(&socketOrder, "socketorder", "", "previous initialization order - used when more than one listener was started") - - regLock = &sync.Mutex{} - runningServers = make(map[string]*Server) - runningServersOrder = []string{} - socketPtrOffsetMap = make(map[string]uint) - - hookableSignals = []os.Signal{ - syscall.SIGHUP, - syscall.SIGINT, - syscall.SIGTERM, - } -} - -// NewServer returns a new graceServer. -func NewServer(addr string, handler http.Handler) (srv *Server) { - regLock.Lock() - defer regLock.Unlock() - - if !flag.Parsed() { - flag.Parse() - } - if len(socketOrder) > 0 { - for i, addr := range strings.Split(socketOrder, ",") { - socketPtrOffsetMap[addr] = uint(i) - } - } else { - socketPtrOffsetMap[addr] = uint(len(runningServersOrder)) - } - - srv = &Server{ - wg: sync.WaitGroup{}, - sigChan: make(chan os.Signal), - isChild: isChild, - SignalHooks: map[int]map[os.Signal][]func(){ - PreSignal: { - syscall.SIGHUP: {}, - syscall.SIGINT: {}, - syscall.SIGTERM: {}, - }, - PostSignal: { - syscall.SIGHUP: {}, - syscall.SIGINT: {}, - syscall.SIGTERM: {}, - }, - }, - state: StateInit, - Network: "tcp", - } - srv.Server = &http.Server{} - srv.Server.Addr = addr - srv.Server.ReadTimeout = DefaultReadTimeOut - srv.Server.WriteTimeout = DefaultWriteTimeOut - srv.Server.MaxHeaderBytes = DefaultMaxHeaderBytes - srv.Server.Handler = handler - - runningServersOrder = append(runningServersOrder, addr) - runningServers[addr] = srv - - return -} - -// ListenAndServe refer http.ListenAndServe -func ListenAndServe(addr string, handler http.Handler) error { - server := NewServer(addr, handler) - return server.ListenAndServe() -} - -// ListenAndServeTLS refer http.ListenAndServeTLS -func ListenAndServeTLS(addr string, certFile string, keyFile string, handler http.Handler) error { - server := NewServer(addr, handler) - return server.ListenAndServeTLS(certFile, keyFile) -} diff --git a/vendor/github.com/astaxie/beego/grace/listener.go b/vendor/github.com/astaxie/beego/grace/listener.go deleted file mode 100644 index 7ede63a30..000000000 --- a/vendor/github.com/astaxie/beego/grace/listener.go +++ /dev/null @@ -1,62 +0,0 @@ -package grace - -import ( - "net" - "os" - "syscall" - "time" -) - -type graceListener struct { - net.Listener - stop chan error - stopped bool - server *Server -} - -func newGraceListener(l net.Listener, srv *Server) (el *graceListener) { - el = &graceListener{ - Listener: l, - stop: make(chan error), - server: srv, - } - go func() { - <-el.stop - el.stopped = true - el.stop <- el.Listener.Close() - }() - return -} - -func (gl *graceListener) Accept() (c net.Conn, err error) { - tc, err := gl.Listener.(*net.TCPListener).AcceptTCP() - if err != nil { - return - } - - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - - c = &graceConn{ - Conn: tc, - server: gl.server, - } - - gl.server.wg.Add(1) - return -} - -func (gl *graceListener) Close() error { - if gl.stopped { - return syscall.EINVAL - } - gl.stop <- nil - return <-gl.stop -} - -func (gl *graceListener) File() *os.File { - // returns a dup(2) - FD_CLOEXEC flag *not* set - tl := gl.Listener.(*net.TCPListener) - fl, _ := tl.File() - return fl -} diff --git a/vendor/github.com/astaxie/beego/grace/server.go b/vendor/github.com/astaxie/beego/grace/server.go deleted file mode 100644 index 513a52a99..000000000 --- a/vendor/github.com/astaxie/beego/grace/server.go +++ /dev/null @@ -1,363 +0,0 @@ -package grace - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "os/exec" - "os/signal" - "strings" - "sync" - "syscall" - "time" -) - -// Server embedded http.Server -type Server struct { - *http.Server - GraceListener net.Listener - SignalHooks map[int]map[os.Signal][]func() - tlsInnerListener *graceListener - wg sync.WaitGroup - sigChan chan os.Signal - isChild bool - state uint8 - Network string -} - -// Serve accepts incoming connections on the Listener l, -// creating a new service goroutine for each. -// The service goroutines read requests and then call srv.Handler to reply to them. -func (srv *Server) Serve() (err error) { - srv.state = StateRunning - err = srv.Server.Serve(srv.GraceListener) - log.Println(syscall.Getpid(), "Waiting for connections to finish...") - srv.wg.Wait() - srv.state = StateTerminate - return -} - -// ListenAndServe listens on the TCP network address srv.Addr and then calls Serve -// to handle requests on incoming connections. If srv.Addr is blank, ":http" is -// used. -func (srv *Server) ListenAndServe() (err error) { - addr := srv.Addr - if addr == "" { - addr = ":http" - } - - go srv.handleSignals() - - l, err := srv.getListener(addr) - if err != nil { - log.Println(err) - return err - } - - srv.GraceListener = newGraceListener(l, srv) - - if srv.isChild { - process, err := os.FindProcess(os.Getppid()) - if err != nil { - log.Println(err) - return err - } - err = process.Signal(syscall.SIGTERM) - if err != nil { - return err - } - } - - log.Println(os.Getpid(), srv.Addr) - return srv.Serve() -} - -// ListenAndServeTLS listens on the TCP network address srv.Addr and then calls -// Serve to handle requests on incoming TLS connections. -// -// Filenames containing a certificate and matching private key for the server must -// be provided. If the certificate is signed by a certificate authority, the -// certFile should be the concatenation of the server's certificate followed by the -// CA's certificate. -// -// If srv.Addr is blank, ":https" is used. -func (srv *Server) ListenAndServeTLS(certFile, keyFile string) (err error) { - addr := srv.Addr - if addr == "" { - addr = ":https" - } - - if srv.TLSConfig == nil { - srv.TLSConfig = &tls.Config{} - } - if srv.TLSConfig.NextProtos == nil { - srv.TLSConfig.NextProtos = []string{"http/1.1"} - } - - srv.TLSConfig.Certificates = make([]tls.Certificate, 1) - srv.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return - } - - go srv.handleSignals() - - l, err := srv.getListener(addr) - if err != nil { - log.Println(err) - return err - } - - srv.tlsInnerListener = newGraceListener(l, srv) - srv.GraceListener = tls.NewListener(srv.tlsInnerListener, srv.TLSConfig) - - if srv.isChild { - process, err := os.FindProcess(os.Getppid()) - if err != nil { - log.Println(err) - return err - } - err = process.Signal(syscall.SIGTERM) - if err != nil { - return err - } - } - log.Println(os.Getpid(), srv.Addr) - return srv.Serve() -} - -// ListenAndServeMutualTLS listens on the TCP network address srv.Addr and then calls -// Serve to handle requests on incoming mutual TLS connections. -func (srv *Server) ListenAndServeMutualTLS(certFile, keyFile, trustFile string) (err error) { - addr := srv.Addr - if addr == "" { - addr = ":https" - } - - if srv.TLSConfig == nil { - srv.TLSConfig = &tls.Config{} - } - if srv.TLSConfig.NextProtos == nil { - srv.TLSConfig.NextProtos = []string{"http/1.1"} - } - - srv.TLSConfig.Certificates = make([]tls.Certificate, 1) - srv.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return - } - srv.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert - pool := x509.NewCertPool() - data, err := ioutil.ReadFile(trustFile) - if err != nil { - log.Println(err) - return err - } - pool.AppendCertsFromPEM(data) - srv.TLSConfig.ClientCAs = pool - log.Println("Mutual HTTPS") - go srv.handleSignals() - - l, err := srv.getListener(addr) - if err != nil { - log.Println(err) - return err - } - - srv.tlsInnerListener = newGraceListener(l, srv) - srv.GraceListener = tls.NewListener(srv.tlsInnerListener, srv.TLSConfig) - - if srv.isChild { - process, err := os.FindProcess(os.Getppid()) - if err != nil { - log.Println(err) - return err - } - err = process.Kill() - if err != nil { - return err - } - } - log.Println(os.Getpid(), srv.Addr) - return srv.Serve() -} - -// getListener either opens a new socket to listen on, or takes the acceptor socket -// it got passed when restarted. -func (srv *Server) getListener(laddr string) (l net.Listener, err error) { - if srv.isChild { - var ptrOffset uint - if len(socketPtrOffsetMap) > 0 { - ptrOffset = socketPtrOffsetMap[laddr] - log.Println("laddr", laddr, "ptr offset", socketPtrOffsetMap[laddr]) - } - - f := os.NewFile(uintptr(3+ptrOffset), "") - l, err = net.FileListener(f) - if err != nil { - err = fmt.Errorf("net.FileListener error: %v", err) - return - } - } else { - l, err = net.Listen(srv.Network, laddr) - if err != nil { - err = fmt.Errorf("net.Listen error: %v", err) - return - } - } - return -} - -// handleSignals listens for os Signals and calls any hooked in function that the -// user had registered with the signal. -func (srv *Server) handleSignals() { - var sig os.Signal - - signal.Notify( - srv.sigChan, - hookableSignals..., - ) - - pid := syscall.Getpid() - for { - sig = <-srv.sigChan - srv.signalHooks(PreSignal, sig) - switch sig { - case syscall.SIGHUP: - log.Println(pid, "Received SIGHUP. forking.") - err := srv.fork() - if err != nil { - log.Println("Fork err:", err) - } - case syscall.SIGINT: - log.Println(pid, "Received SIGINT.") - srv.shutdown() - case syscall.SIGTERM: - log.Println(pid, "Received SIGTERM.") - srv.shutdown() - default: - log.Printf("Received %v: nothing i care about...\n", sig) - } - srv.signalHooks(PostSignal, sig) - } -} - -func (srv *Server) signalHooks(ppFlag int, sig os.Signal) { - if _, notSet := srv.SignalHooks[ppFlag][sig]; !notSet { - return - } - for _, f := range srv.SignalHooks[ppFlag][sig] { - f() - } -} - -// shutdown closes the listener so that no new connections are accepted. it also -// starts a goroutine that will serverTimeout (stop all running requests) the server -// after DefaultTimeout. -func (srv *Server) shutdown() { - if srv.state != StateRunning { - return - } - - srv.state = StateShuttingDown - if DefaultTimeout >= 0 { - go srv.serverTimeout(DefaultTimeout) - } - err := srv.GraceListener.Close() - if err != nil { - log.Println(syscall.Getpid(), "Listener.Close() error:", err) - } else { - log.Println(syscall.Getpid(), srv.GraceListener.Addr(), "Listener closed.") - } -} - -// serverTimeout forces the server to shutdown in a given timeout - whether it -// finished outstanding requests or not. if Read/WriteTimeout are not set or the -// max header size is very big a connection could hang -func (srv *Server) serverTimeout(d time.Duration) { - defer func() { - if r := recover(); r != nil { - log.Println("WaitGroup at 0", r) - } - }() - if srv.state != StateShuttingDown { - return - } - time.Sleep(d) - log.Println("[STOP - Hammer Time] Forcefully shutting down parent") - for { - if srv.state == StateTerminate { - break - } - srv.wg.Done() - } -} - -func (srv *Server) fork() (err error) { - regLock.Lock() - defer regLock.Unlock() - if runningServersForked { - return - } - runningServersForked = true - - var files = make([]*os.File, len(runningServers)) - var orderArgs = make([]string, len(runningServers)) - for _, srvPtr := range runningServers { - switch srvPtr.GraceListener.(type) { - case *graceListener: - files[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.GraceListener.(*graceListener).File() - default: - files[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.tlsInnerListener.File() - } - orderArgs[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.Server.Addr - } - - log.Println(files) - path := os.Args[0] - var args []string - if len(os.Args) > 1 { - for _, arg := range os.Args[1:] { - if arg == "-graceful" { - break - } - args = append(args, arg) - } - } - args = append(args, "-graceful") - if len(runningServers) > 1 { - args = append(args, fmt.Sprintf(`-socketorder=%s`, strings.Join(orderArgs, ","))) - log.Println(args) - } - cmd := exec.Command(path, args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.ExtraFiles = files - err = cmd.Start() - if err != nil { - log.Fatalf("Restart: Failed to launch, error: %v", err) - } - - return -} - -// RegisterSignalHook registers a function to be run PreSignal or PostSignal for a given signal. -func (srv *Server) RegisterSignalHook(ppFlag int, sig os.Signal, f func()) (err error) { - if ppFlag != PreSignal && ppFlag != PostSignal { - err = fmt.Errorf("Invalid ppFlag argument. Must be either grace.PreSignal or grace.PostSignal") - return - } - for _, s := range hookableSignals { - if s == sig { - srv.SignalHooks[ppFlag][sig] = append(srv.SignalHooks[ppFlag][sig], f) - return - } - } - err = fmt.Errorf("Signal '%v' is not supported", sig) - return -} diff --git a/vendor/github.com/astaxie/beego/hooks.go b/vendor/github.com/astaxie/beego/hooks.go deleted file mode 100644 index b8671d353..000000000 --- a/vendor/github.com/astaxie/beego/hooks.go +++ /dev/null @@ -1,103 +0,0 @@ -package beego - -import ( - "encoding/json" - "mime" - "net/http" - "path/filepath" - - "github.com/astaxie/beego/context" - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/session" -) - -// register MIME type with content type -func registerMime() error { - for k, v := range mimemaps { - mime.AddExtensionType(k, v) - } - return nil -} - -// register default error http handlers, 404,401,403,500 and 503. -func registerDefaultErrorHandler() error { - m := map[string]func(http.ResponseWriter, *http.Request){ - "401": unauthorized, - "402": paymentRequired, - "403": forbidden, - "404": notFound, - "405": methodNotAllowed, - "500": internalServerError, - "501": notImplemented, - "502": badGateway, - "503": serviceUnavailable, - "504": gatewayTimeout, - "417": invalidxsrf, - "422": missingxsrf, - } - for e, h := range m { - if _, ok := ErrorMaps[e]; !ok { - ErrorHandler(e, h) - } - } - return nil -} - -func registerSession() error { - if BConfig.WebConfig.Session.SessionOn { - var err error - sessionConfig := AppConfig.String("sessionConfig") - conf := new(session.ManagerConfig) - if sessionConfig == "" { - conf.CookieName = BConfig.WebConfig.Session.SessionName - conf.EnableSetCookie = BConfig.WebConfig.Session.SessionAutoSetCookie - conf.Gclifetime = BConfig.WebConfig.Session.SessionGCMaxLifetime - conf.Secure = BConfig.Listen.EnableHTTPS - conf.CookieLifeTime = BConfig.WebConfig.Session.SessionCookieLifeTime - conf.ProviderConfig = filepath.ToSlash(BConfig.WebConfig.Session.SessionProviderConfig) - conf.DisableHTTPOnly = BConfig.WebConfig.Session.SessionDisableHTTPOnly - conf.Domain = BConfig.WebConfig.Session.SessionDomain - conf.EnableSidInHTTPHeader = BConfig.WebConfig.Session.SessionEnableSidInHTTPHeader - conf.SessionNameInHTTPHeader = BConfig.WebConfig.Session.SessionNameInHTTPHeader - conf.EnableSidInURLQuery = BConfig.WebConfig.Session.SessionEnableSidInURLQuery - } else { - if err = json.Unmarshal([]byte(sessionConfig), conf); err != nil { - return err - } - } - if GlobalSessions, err = session.NewManager(BConfig.WebConfig.Session.SessionProvider, conf); err != nil { - return err - } - go GlobalSessions.GC() - } - return nil -} - -func registerTemplate() error { - defer lockViewPaths() - if err := AddViewPath(BConfig.WebConfig.ViewsPath); err != nil { - if BConfig.RunMode == DEV { - logs.Warn(err) - } - return err - } - return nil -} - -func registerAdmin() error { - if BConfig.Listen.EnableAdmin { - go beeAdminApp.Run() - } - return nil -} - -func registerGzip() error { - if BConfig.EnableGzip { - context.InitGzip( - AppConfig.DefaultInt("gzipMinLength", -1), - AppConfig.DefaultInt("gzipCompressLevel", -1), - AppConfig.DefaultStrings("includedMethods", []string{"GET"}), - ) - } - return nil -} diff --git a/vendor/github.com/astaxie/beego/httplib/README.md b/vendor/github.com/astaxie/beego/httplib/README.md deleted file mode 100644 index 97df8e6b9..000000000 --- a/vendor/github.com/astaxie/beego/httplib/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# httplib -httplib is an libs help you to curl remote url. - -# How to use? - -## GET -you can use Get to crawl data. - - import "github.com/astaxie/beego/httplib" - - str, err := httplib.Get("http://beego.me/").String() - if err != nil { - // error - } - fmt.Println(str) - -## POST -POST data to remote url - - req := httplib.Post("http://beego.me/") - req.Param("username","astaxie") - req.Param("password","123456") - str, err := req.String() - if err != nil { - // error - } - fmt.Println(str) - -## Set timeout - -The default timeout is `60` seconds, function prototype: - - SetTimeout(connectTimeout, readWriteTimeout time.Duration) - -Example: - - // GET - httplib.Get("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) - - // POST - httplib.Post("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) - - -## Debug - -If you want to debug the request info, set the debug on - - httplib.Get("http://beego.me/").Debug(true) - -## Set HTTP Basic Auth - - str, err := Get("http://beego.me/").SetBasicAuth("user", "passwd").String() - if err != nil { - // error - } - fmt.Println(str) - -## Set HTTPS - -If request url is https, You can set the client support TSL: - - httplib.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) - -More info about the `tls.Config` please visit http://golang.org/pkg/crypto/tls/#Config - -## Set HTTP Version - -some servers need to specify the protocol version of HTTP - - httplib.Get("http://beego.me/").SetProtocolVersion("HTTP/1.1") - -## Set Cookie - -some http request need setcookie. So set it like this: - - cookie := &http.Cookie{} - cookie.Name = "username" - cookie.Value = "astaxie" - httplib.Get("http://beego.me/").SetCookie(cookie) - -## Upload file - -httplib support mutil file upload, use `req.PostFile()` - - req := httplib.Post("http://beego.me/") - req.Param("username","astaxie") - req.PostFile("uploadfile1", "httplib.pdf") - str, err := req.String() - if err != nil { - // error - } - fmt.Println(str) - - -See godoc for further documentation and examples. - -* [godoc.org/github.com/astaxie/beego/httplib](https://godoc.org/github.com/astaxie/beego/httplib) diff --git a/vendor/github.com/astaxie/beego/httplib/httplib.go b/vendor/github.com/astaxie/beego/httplib/httplib.go deleted file mode 100644 index 074cf6615..000000000 --- a/vendor/github.com/astaxie/beego/httplib/httplib.go +++ /dev/null @@ -1,624 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package httplib is used as http.Client -// Usage: -// -// import "github.com/astaxie/beego/httplib" -// -// b := httplib.Post("http://beego.me/") -// b.Param("username","astaxie") -// b.Param("password","123456") -// b.PostFile("uploadfile1", "httplib.pdf") -// b.PostFile("uploadfile2", "httplib.txt") -// str, err := b.String() -// if err != nil { -// t.Fatal(err) -// } -// fmt.Println(str) -// -// more docs http://beego.me/docs/module/httplib.md -package httplib - -import ( - "bytes" - "compress/gzip" - "crypto/tls" - "encoding/json" - "encoding/xml" - "io" - "io/ioutil" - "log" - "mime/multipart" - "net" - "net/http" - "net/http/cookiejar" - "net/http/httputil" - "net/url" - "os" - "strings" - "sync" - "time" - "gopkg.in/yaml.v2" -) - -var defaultSetting = BeegoHTTPSettings{ - UserAgent: "beegoServer", - ConnectTimeout: 60 * time.Second, - ReadWriteTimeout: 60 * time.Second, - Gzip: true, - DumpBody: true, -} - -var defaultCookieJar http.CookieJar -var settingMutex sync.Mutex - -// createDefaultCookie creates a global cookiejar to store cookies. -func createDefaultCookie() { - settingMutex.Lock() - defer settingMutex.Unlock() - defaultCookieJar, _ = cookiejar.New(nil) -} - -// SetDefaultSetting Overwrite default settings -func SetDefaultSetting(setting BeegoHTTPSettings) { - settingMutex.Lock() - defer settingMutex.Unlock() - defaultSetting = setting -} - -// NewBeegoRequest return *BeegoHttpRequest with specific method -func NewBeegoRequest(rawurl, method string) *BeegoHTTPRequest { - var resp http.Response - u, err := url.Parse(rawurl) - if err != nil { - log.Println("Httplib:", err) - } - req := http.Request{ - URL: u, - Method: method, - Header: make(http.Header), - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - } - return &BeegoHTTPRequest{ - url: rawurl, - req: &req, - params: map[string][]string{}, - files: map[string]string{}, - setting: defaultSetting, - resp: &resp, - } -} - -// Get returns *BeegoHttpRequest with GET method. -func Get(url string) *BeegoHTTPRequest { - return NewBeegoRequest(url, "GET") -} - -// Post returns *BeegoHttpRequest with POST method. -func Post(url string) *BeegoHTTPRequest { - return NewBeegoRequest(url, "POST") -} - -// Put returns *BeegoHttpRequest with PUT method. -func Put(url string) *BeegoHTTPRequest { - return NewBeegoRequest(url, "PUT") -} - -// Delete returns *BeegoHttpRequest DELETE method. -func Delete(url string) *BeegoHTTPRequest { - return NewBeegoRequest(url, "DELETE") -} - -// Head returns *BeegoHttpRequest with HEAD method. -func Head(url string) *BeegoHTTPRequest { - return NewBeegoRequest(url, "HEAD") -} - -// BeegoHTTPSettings is the http.Client setting -type BeegoHTTPSettings struct { - ShowDebug bool - UserAgent string - ConnectTimeout time.Duration - ReadWriteTimeout time.Duration - TLSClientConfig *tls.Config - Proxy func(*http.Request) (*url.URL, error) - Transport http.RoundTripper - CheckRedirect func(req *http.Request, via []*http.Request) error - EnableCookie bool - Gzip bool - DumpBody bool - Retries int // if set to -1 means will retry forever -} - -// BeegoHTTPRequest provides more useful methods for requesting one url than http.Request. -type BeegoHTTPRequest struct { - url string - req *http.Request - params map[string][]string - files map[string]string - setting BeegoHTTPSettings - resp *http.Response - body []byte - dump []byte -} - -// GetRequest return the request object -func (b *BeegoHTTPRequest) GetRequest() *http.Request { - return b.req -} - -// Setting Change request settings -func (b *BeegoHTTPRequest) Setting(setting BeegoHTTPSettings) *BeegoHTTPRequest { - b.setting = setting - return b -} - -// SetBasicAuth sets the request's Authorization header to use HTTP Basic Authentication with the provided username and password. -func (b *BeegoHTTPRequest) SetBasicAuth(username, password string) *BeegoHTTPRequest { - b.req.SetBasicAuth(username, password) - return b -} - -// SetEnableCookie sets enable/disable cookiejar -func (b *BeegoHTTPRequest) SetEnableCookie(enable bool) *BeegoHTTPRequest { - b.setting.EnableCookie = enable - return b -} - -// SetUserAgent sets User-Agent header field -func (b *BeegoHTTPRequest) SetUserAgent(useragent string) *BeegoHTTPRequest { - b.setting.UserAgent = useragent - return b -} - -// Debug sets show debug or not when executing request. -func (b *BeegoHTTPRequest) Debug(isdebug bool) *BeegoHTTPRequest { - b.setting.ShowDebug = isdebug - return b -} - -// Retries sets Retries times. -// default is 0 means no retried. -// -1 means retried forever. -// others means retried times. -func (b *BeegoHTTPRequest) Retries(times int) *BeegoHTTPRequest { - b.setting.Retries = times - return b -} - -// DumpBody setting whether need to Dump the Body. -func (b *BeegoHTTPRequest) DumpBody(isdump bool) *BeegoHTTPRequest { - b.setting.DumpBody = isdump - return b -} - -// DumpRequest return the DumpRequest -func (b *BeegoHTTPRequest) DumpRequest() []byte { - return b.dump -} - -// SetTimeout sets connect time out and read-write time out for BeegoRequest. -func (b *BeegoHTTPRequest) SetTimeout(connectTimeout, readWriteTimeout time.Duration) *BeegoHTTPRequest { - b.setting.ConnectTimeout = connectTimeout - b.setting.ReadWriteTimeout = readWriteTimeout - return b -} - -// SetTLSClientConfig sets tls connection configurations if visiting https url. -func (b *BeegoHTTPRequest) SetTLSClientConfig(config *tls.Config) *BeegoHTTPRequest { - b.setting.TLSClientConfig = config - return b -} - -// Header add header item string in request. -func (b *BeegoHTTPRequest) Header(key, value string) *BeegoHTTPRequest { - b.req.Header.Set(key, value) - return b -} - -// SetHost set the request host -func (b *BeegoHTTPRequest) SetHost(host string) *BeegoHTTPRequest { - b.req.Host = host - return b -} - -// SetProtocolVersion Set the protocol version for incoming requests. -// Client requests always use HTTP/1.1. -func (b *BeegoHTTPRequest) SetProtocolVersion(vers string) *BeegoHTTPRequest { - if len(vers) == 0 { - vers = "HTTP/1.1" - } - - major, minor, ok := http.ParseHTTPVersion(vers) - if ok { - b.req.Proto = vers - b.req.ProtoMajor = major - b.req.ProtoMinor = minor - } - - return b -} - -// SetCookie add cookie into request. -func (b *BeegoHTTPRequest) SetCookie(cookie *http.Cookie) *BeegoHTTPRequest { - b.req.Header.Add("Cookie", cookie.String()) - return b -} - -// SetTransport set the setting transport -func (b *BeegoHTTPRequest) SetTransport(transport http.RoundTripper) *BeegoHTTPRequest { - b.setting.Transport = transport - return b -} - -// SetProxy set the http proxy -// example: -// -// func(req *http.Request) (*url.URL, error) { -// u, _ := url.ParseRequestURI("http://127.0.0.1:8118") -// return u, nil -// } -func (b *BeegoHTTPRequest) SetProxy(proxy func(*http.Request) (*url.URL, error)) *BeegoHTTPRequest { - b.setting.Proxy = proxy - return b -} - -// SetCheckRedirect specifies the policy for handling redirects. -// -// If CheckRedirect is nil, the Client uses its default policy, -// which is to stop after 10 consecutive requests. -func (b *BeegoHTTPRequest) SetCheckRedirect(redirect func(req *http.Request, via []*http.Request) error) *BeegoHTTPRequest { - b.setting.CheckRedirect = redirect - return b -} - -// Param adds query param in to request. -// params build query string as ?key1=value1&key2=value2... -func (b *BeegoHTTPRequest) Param(key, value string) *BeegoHTTPRequest { - if param, ok := b.params[key]; ok { - b.params[key] = append(param, value) - } else { - b.params[key] = []string{value} - } - return b -} - -// PostFile add a post file to the request -func (b *BeegoHTTPRequest) PostFile(formname, filename string) *BeegoHTTPRequest { - b.files[formname] = filename - return b -} - -// Body adds request raw body. -// it supports string and []byte. -func (b *BeegoHTTPRequest) Body(data interface{}) *BeegoHTTPRequest { - switch t := data.(type) { - case string: - bf := bytes.NewBufferString(t) - b.req.Body = ioutil.NopCloser(bf) - b.req.ContentLength = int64(len(t)) - case []byte: - bf := bytes.NewBuffer(t) - b.req.Body = ioutil.NopCloser(bf) - b.req.ContentLength = int64(len(t)) - } - return b -} - -// XMLBody adds request raw body encoding by XML. -func (b *BeegoHTTPRequest) XMLBody(obj interface{}) (*BeegoHTTPRequest, error) { - if b.req.Body == nil && obj != nil { - byts, err := xml.Marshal(obj) - if err != nil { - return b, err - } - b.req.Body = ioutil.NopCloser(bytes.NewReader(byts)) - b.req.ContentLength = int64(len(byts)) - b.req.Header.Set("Content-Type", "application/xml") - } - return b, nil -} - -// YAMLBody adds request raw body encoding by YAML. -func (b *BeegoHTTPRequest) YAMLBody(obj interface{}) (*BeegoHTTPRequest, error) { - if b.req.Body == nil && obj != nil { - byts, err := yaml.Marshal(obj) - if err != nil { - return b, err - } - b.req.Body = ioutil.NopCloser(bytes.NewReader(byts)) - b.req.ContentLength = int64(len(byts)) - b.req.Header.Set("Content-Type", "application/x+yaml") - } - return b, nil -} - -// JSONBody adds request raw body encoding by JSON. -func (b *BeegoHTTPRequest) JSONBody(obj interface{}) (*BeegoHTTPRequest, error) { - if b.req.Body == nil && obj != nil { - byts, err := json.Marshal(obj) - if err != nil { - return b, err - } - b.req.Body = ioutil.NopCloser(bytes.NewReader(byts)) - b.req.ContentLength = int64(len(byts)) - b.req.Header.Set("Content-Type", "application/json") - } - return b, nil -} - -func (b *BeegoHTTPRequest) buildURL(paramBody string) { - // build GET url with query string - if b.req.Method == "GET" && len(paramBody) > 0 { - if strings.Contains(b.url, "?") { - b.url += "&" + paramBody - } else { - b.url = b.url + "?" + paramBody - } - return - } - - // build POST/PUT/PATCH url and body - if (b.req.Method == "POST" || b.req.Method == "PUT" || b.req.Method == "PATCH" || b.req.Method == "DELETE") && b.req.Body == nil { - // with files - if len(b.files) > 0 { - pr, pw := io.Pipe() - bodyWriter := multipart.NewWriter(pw) - go func() { - for formname, filename := range b.files { - fileWriter, err := bodyWriter.CreateFormFile(formname, filename) - if err != nil { - log.Println("Httplib:", err) - } - fh, err := os.Open(filename) - if err != nil { - log.Println("Httplib:", err) - } - //iocopy - _, err = io.Copy(fileWriter, fh) - fh.Close() - if err != nil { - log.Println("Httplib:", err) - } - } - for k, v := range b.params { - for _, vv := range v { - bodyWriter.WriteField(k, vv) - } - } - bodyWriter.Close() - pw.Close() - }() - b.Header("Content-Type", bodyWriter.FormDataContentType()) - b.req.Body = ioutil.NopCloser(pr) - return - } - - // with params - if len(paramBody) > 0 { - b.Header("Content-Type", "application/x-www-form-urlencoded") - b.Body(paramBody) - } - } -} - -func (b *BeegoHTTPRequest) getResponse() (*http.Response, error) { - if b.resp.StatusCode != 0 { - return b.resp, nil - } - resp, err := b.DoRequest() - if err != nil { - return nil, err - } - b.resp = resp - return resp, nil -} - -// DoRequest will do the client.Do -func (b *BeegoHTTPRequest) DoRequest() (resp *http.Response, err error) { - var paramBody string - if len(b.params) > 0 { - var buf bytes.Buffer - for k, v := range b.params { - for _, vv := range v { - buf.WriteString(url.QueryEscape(k)) - buf.WriteByte('=') - buf.WriteString(url.QueryEscape(vv)) - buf.WriteByte('&') - } - } - paramBody = buf.String() - paramBody = paramBody[0 : len(paramBody)-1] - } - - b.buildURL(paramBody) - urlParsed, err := url.Parse(b.url) - if err != nil { - return nil, err - } - - b.req.URL = urlParsed - - trans := b.setting.Transport - - if trans == nil { - // create default transport - trans = &http.Transport{ - TLSClientConfig: b.setting.TLSClientConfig, - Proxy: b.setting.Proxy, - Dial: TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout), - MaxIdleConnsPerHost: 100, - } - } else { - // if b.transport is *http.Transport then set the settings. - if t, ok := trans.(*http.Transport); ok { - if t.TLSClientConfig == nil { - t.TLSClientConfig = b.setting.TLSClientConfig - } - if t.Proxy == nil { - t.Proxy = b.setting.Proxy - } - if t.Dial == nil { - t.Dial = TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout) - } - } - } - - var jar http.CookieJar - if b.setting.EnableCookie { - if defaultCookieJar == nil { - createDefaultCookie() - } - jar = defaultCookieJar - } - - client := &http.Client{ - Transport: trans, - Jar: jar, - } - - if b.setting.UserAgent != "" && b.req.Header.Get("User-Agent") == "" { - b.req.Header.Set("User-Agent", b.setting.UserAgent) - } - - if b.setting.CheckRedirect != nil { - client.CheckRedirect = b.setting.CheckRedirect - } - - if b.setting.ShowDebug { - dump, err := httputil.DumpRequest(b.req, b.setting.DumpBody) - if err != nil { - log.Println(err.Error()) - } - b.dump = dump - } - // retries default value is 0, it will run once. - // retries equal to -1, it will run forever until success - // retries is setted, it will retries fixed times. - for i := 0; b.setting.Retries == -1 || i <= b.setting.Retries; i++ { - resp, err = client.Do(b.req) - if err == nil { - break - } - } - return resp, err -} - -// String returns the body string in response. -// it calls Response inner. -func (b *BeegoHTTPRequest) String() (string, error) { - data, err := b.Bytes() - if err != nil { - return "", err - } - - return string(data), nil -} - -// Bytes returns the body []byte in response. -// it calls Response inner. -func (b *BeegoHTTPRequest) Bytes() ([]byte, error) { - if b.body != nil { - return b.body, nil - } - resp, err := b.getResponse() - if err != nil { - return nil, err - } - if resp.Body == nil { - return nil, nil - } - defer resp.Body.Close() - if b.setting.Gzip && resp.Header.Get("Content-Encoding") == "gzip" { - reader, err := gzip.NewReader(resp.Body) - if err != nil { - return nil, err - } - b.body, err = ioutil.ReadAll(reader) - return b.body, err - } - b.body, err = ioutil.ReadAll(resp.Body) - return b.body, err -} - -// ToFile saves the body data in response to one file. -// it calls Response inner. -func (b *BeegoHTTPRequest) ToFile(filename string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - resp, err := b.getResponse() - if err != nil { - return err - } - if resp.Body == nil { - return nil - } - defer resp.Body.Close() - _, err = io.Copy(f, resp.Body) - return err -} - -// ToJSON returns the map that marshals from the body bytes as json in response . -// it calls Response inner. -func (b *BeegoHTTPRequest) ToJSON(v interface{}) error { - data, err := b.Bytes() - if err != nil { - return err - } - return json.Unmarshal(data, v) -} - -// ToXML returns the map that marshals from the body bytes as xml in response . -// it calls Response inner. -func (b *BeegoHTTPRequest) ToXML(v interface{}) error { - data, err := b.Bytes() - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -// ToYAML returns the map that marshals from the body bytes as yaml in response . -// it calls Response inner. -func (b *BeegoHTTPRequest) ToYAML(v interface{}) error { - data, err := b.Bytes() - if err != nil { - return err - } - return yaml.Unmarshal(data, v) -} - -// Response executes request client gets response mannually. -func (b *BeegoHTTPRequest) Response() (*http.Response, error) { - return b.getResponse() -} - -// TimeoutDialer returns functions of connection dialer with timeout settings for http.Transport Dial field. -func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) { - return func(netw, addr string) (net.Conn, error) { - conn, err := net.DialTimeout(netw, addr, cTimeout) - if err != nil { - return nil, err - } - err = conn.SetDeadline(time.Now().Add(rwTimeout)) - return conn, err - } -} diff --git a/vendor/github.com/astaxie/beego/log.go b/vendor/github.com/astaxie/beego/log.go deleted file mode 100644 index e9412f920..000000000 --- a/vendor/github.com/astaxie/beego/log.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "strings" - - "github.com/astaxie/beego/logs" -) - -// Log levels to control the logging output. -const ( - LevelEmergency = iota - LevelAlert - LevelCritical - LevelError - LevelWarning - LevelNotice - LevelInformational - LevelDebug -) - -// BeeLogger references the used application logger. -var BeeLogger = logs.GetBeeLogger() - -// SetLevel sets the global log level used by the simple logger. -func SetLevel(l int) { - logs.SetLevel(l) -} - -// SetLogFuncCall set the CallDepth, default is 3 -func SetLogFuncCall(b bool) { - logs.SetLogFuncCall(b) -} - -// SetLogger sets a new logger. -func SetLogger(adaptername string, config string) error { - return logs.SetLogger(adaptername, config) -} - -// Emergency logs a message at emergency level. -func Emergency(v ...interface{}) { - logs.Emergency(generateFmtStr(len(v)), v...) -} - -// Alert logs a message at alert level. -func Alert(v ...interface{}) { - logs.Alert(generateFmtStr(len(v)), v...) -} - -// Critical logs a message at critical level. -func Critical(v ...interface{}) { - logs.Critical(generateFmtStr(len(v)), v...) -} - -// Error logs a message at error level. -func Error(v ...interface{}) { - logs.Error(generateFmtStr(len(v)), v...) -} - -// Warning logs a message at warning level. -func Warning(v ...interface{}) { - logs.Warning(generateFmtStr(len(v)), v...) -} - -// Warn compatibility alias for Warning() -func Warn(v ...interface{}) { - logs.Warn(generateFmtStr(len(v)), v...) -} - -// Notice logs a message at notice level. -func Notice(v ...interface{}) { - logs.Notice(generateFmtStr(len(v)), v...) -} - -// Informational logs a message at info level. -func Informational(v ...interface{}) { - logs.Informational(generateFmtStr(len(v)), v...) -} - -// Info compatibility alias for Warning() -func Info(v ...interface{}) { - logs.Info(generateFmtStr(len(v)), v...) -} - -// Debug logs a message at debug level. -func Debug(v ...interface{}) { - logs.Debug(generateFmtStr(len(v)), v...) -} - -// Trace logs a message at trace level. -// compatibility alias for Warning() -func Trace(v ...interface{}) { - logs.Trace(generateFmtStr(len(v)), v...) -} - -func generateFmtStr(n int) string { - return strings.Repeat("%v ", n) -} diff --git a/vendor/github.com/astaxie/beego/logs/README.md b/vendor/github.com/astaxie/beego/logs/README.md deleted file mode 100644 index c05bcc044..000000000 --- a/vendor/github.com/astaxie/beego/logs/README.md +++ /dev/null @@ -1,72 +0,0 @@ -## logs -logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` . - - -## How to install? - - go get github.com/astaxie/beego/logs - - -## What adapters are supported? - -As of now this logs support console, file,smtp and conn. - - -## How to use it? - -First you must import it - -```golang -import ( - "github.com/astaxie/beego/logs" -) -``` - -Then init a Log (example with console adapter) - -```golang -log := logs.NewLogger(10000) -log.SetLogger("console", "") -``` - -> the first params stand for how many channel - -Use it like this: - -```golang -log.Trace("trace") -log.Info("info") -log.Warn("warning") -log.Debug("debug") -log.Critical("critical") -``` - -## File adapter - -Configure file adapter like this: - -```golang -log := NewLogger(10000) -log.SetLogger("file", `{"filename":"test.log"}`) -``` - -## Conn adapter - -Configure like this: - -```golang -log := NewLogger(1000) -log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`) -log.Info("info") -``` - -## Smtp adapter - -Configure like this: - -```golang -log := NewLogger(10000) -log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`) -log.Critical("sendmail critical") -time.Sleep(time.Second * 30) -``` diff --git a/vendor/github.com/astaxie/beego/logs/accesslog.go b/vendor/github.com/astaxie/beego/logs/accesslog.go deleted file mode 100644 index 3ff9e20fc..000000000 --- a/vendor/github.com/astaxie/beego/logs/accesslog.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "bytes" - "strings" - "encoding/json" - "fmt" - "time" -) - -const ( - apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s" - apacheFormat = "APACHE_FORMAT" - jsonFormat = "JSON_FORMAT" -) - -// AccessLogRecord struct for holding access log data. -type AccessLogRecord struct { - RemoteAddr string `json:"remote_addr"` - RequestTime time.Time `json:"request_time"` - RequestMethod string `json:"request_method"` - Request string `json:"request"` - ServerProtocol string `json:"server_protocol"` - Host string `json:"host"` - Status int `json:"status"` - BodyBytesSent int64 `json:"body_bytes_sent"` - ElapsedTime time.Duration `json:"elapsed_time"` - HTTPReferrer string `json:"http_referrer"` - HTTPUserAgent string `json:"http_user_agent"` - RemoteUser string `json:"remote_user"` -} - -func (r *AccessLogRecord) json() ([]byte, error) { - buffer := &bytes.Buffer{} - encoder := json.NewEncoder(buffer) - disableEscapeHTML(encoder) - - err := encoder.Encode(r) - return buffer.Bytes(), err -} - -func disableEscapeHTML(i interface{}) { - if e, ok := i.(interface { - SetEscapeHTML(bool) - }); ok { - e.SetEscapeHTML(false) - } -} - -// AccessLog - Format and print access log. -func AccessLog(r *AccessLogRecord, format string) { - var msg string - switch format { - case apacheFormat: - timeFormatted := r.RequestTime.Format("02/Jan/2006 03:04:05") - msg = fmt.Sprintf(apacheFormatPattern, r.RemoteAddr, timeFormatted, r.Request, r.Status, r.BodyBytesSent, - r.ElapsedTime.Seconds(), r.HTTPReferrer, r.HTTPUserAgent) - case jsonFormat: - fallthrough - default: - jsonData, err := r.json() - if err != nil { - msg = fmt.Sprintf(`{"Error": "%s"}`, err) - } else { - msg = string(jsonData) - } - } - beeLogger.writeMsg(levelLoggerImpl, strings.TrimSpace(msg)) -} diff --git a/vendor/github.com/astaxie/beego/logs/color.go b/vendor/github.com/astaxie/beego/logs/color.go deleted file mode 100644 index 41d23638a..000000000 --- a/vendor/github.com/astaxie/beego/logs/color.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package logs - -import "io" - -type ansiColorWriter struct { - w io.Writer - mode outputMode -} - -func (cw *ansiColorWriter) Write(p []byte) (int, error) { - return cw.w.Write(p) -} diff --git a/vendor/github.com/astaxie/beego/logs/color_windows.go b/vendor/github.com/astaxie/beego/logs/color_windows.go deleted file mode 100644 index 4e28f1888..000000000 --- a/vendor/github.com/astaxie/beego/logs/color_windows.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package logs - -import ( - "bytes" - "io" - "strings" - "syscall" - "unsafe" -) - -type ( - csiState int - parseResult int -) - -const ( - outsideCsiCode csiState = iota - firstCsiCode - secondCsiCode -) - -const ( - noConsole parseResult = iota - changedColor - unknown -) - -type ansiColorWriter struct { - w io.Writer - mode outputMode - state csiState - paramStartBuf bytes.Buffer - paramBuf bytes.Buffer -} - -const ( - firstCsiChar byte = '\x1b' - secondeCsiChar byte = '[' - separatorChar byte = ';' - sgrCode byte = 'm' -) - -const ( - foregroundBlue = uint16(0x0001) - foregroundGreen = uint16(0x0002) - foregroundRed = uint16(0x0004) - foregroundIntensity = uint16(0x0008) - backgroundBlue = uint16(0x0010) - backgroundGreen = uint16(0x0020) - backgroundRed = uint16(0x0040) - backgroundIntensity = uint16(0x0080) - underscore = uint16(0x8000) - - foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity - backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity -) - -const ( - ansiReset = "0" - ansiIntensityOn = "1" - ansiIntensityOff = "21" - ansiUnderlineOn = "4" - ansiUnderlineOff = "24" - ansiBlinkOn = "5" - ansiBlinkOff = "25" - - ansiForegroundBlack = "30" - ansiForegroundRed = "31" - ansiForegroundGreen = "32" - ansiForegroundYellow = "33" - ansiForegroundBlue = "34" - ansiForegroundMagenta = "35" - ansiForegroundCyan = "36" - ansiForegroundWhite = "37" - ansiForegroundDefault = "39" - - ansiBackgroundBlack = "40" - ansiBackgroundRed = "41" - ansiBackgroundGreen = "42" - ansiBackgroundYellow = "43" - ansiBackgroundBlue = "44" - ansiBackgroundMagenta = "45" - ansiBackgroundCyan = "46" - ansiBackgroundWhite = "47" - ansiBackgroundDefault = "49" - - ansiLightForegroundGray = "90" - ansiLightForegroundRed = "91" - ansiLightForegroundGreen = "92" - ansiLightForegroundYellow = "93" - ansiLightForegroundBlue = "94" - ansiLightForegroundMagenta = "95" - ansiLightForegroundCyan = "96" - ansiLightForegroundWhite = "97" - - ansiLightBackgroundGray = "100" - ansiLightBackgroundRed = "101" - ansiLightBackgroundGreen = "102" - ansiLightBackgroundYellow = "103" - ansiLightBackgroundBlue = "104" - ansiLightBackgroundMagenta = "105" - ansiLightBackgroundCyan = "106" - ansiLightBackgroundWhite = "107" -) - -type drawType int - -const ( - foreground drawType = iota - background -) - -type winColor struct { - code uint16 - drawType drawType -} - -var colorMap = map[string]winColor{ - ansiForegroundBlack: {0, foreground}, - ansiForegroundRed: {foregroundRed, foreground}, - ansiForegroundGreen: {foregroundGreen, foreground}, - ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground}, - ansiForegroundBlue: {foregroundBlue, foreground}, - ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground}, - ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground}, - ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, - ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, - - ansiBackgroundBlack: {0, background}, - ansiBackgroundRed: {backgroundRed, background}, - ansiBackgroundGreen: {backgroundGreen, background}, - ansiBackgroundYellow: {backgroundRed | backgroundGreen, background}, - ansiBackgroundBlue: {backgroundBlue, background}, - ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background}, - ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background}, - ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background}, - ansiBackgroundDefault: {0, background}, - - ansiLightForegroundGray: {foregroundIntensity, foreground}, - ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground}, - ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground}, - ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground}, - ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground}, - ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground}, - ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground}, - ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground}, - - ansiLightBackgroundGray: {backgroundIntensity, background}, - ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background}, - ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background}, - ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background}, - ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background}, - ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background}, - ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background}, - ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background}, -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - defaultAttr *textAttributes -) - -func init() { - screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) - if screenInfo != nil { - colorMap[ansiForegroundDefault] = winColor{ - screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue), - foreground, - } - colorMap[ansiBackgroundDefault] = winColor{ - screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue), - background, - } - defaultAttr = convertTextAttr(screenInfo.WAttributes) - } -} - -type coord struct { - X, Y int16 -} - -type smallRect struct { - Left, Top, Right, Bottom int16 -} - -type consoleScreenBufferInfo struct { - DwSize coord - DwCursorPosition coord - WAttributes uint16 - SrWindow smallRect - DwMaximumWindowSize coord -} - -func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo { - var csbi consoleScreenBufferInfo - ret, _, _ := procGetConsoleScreenBufferInfo.Call( - hConsoleOutput, - uintptr(unsafe.Pointer(&csbi))) - if ret == 0 { - return nil - } - return &csbi -} - -func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool { - ret, _, _ := procSetConsoleTextAttribute.Call( - hConsoleOutput, - uintptr(wAttributes)) - return ret != 0 -} - -type textAttributes struct { - foregroundColor uint16 - backgroundColor uint16 - foregroundIntensity uint16 - backgroundIntensity uint16 - underscore uint16 - otherAttributes uint16 -} - -func convertTextAttr(winAttr uint16) *textAttributes { - fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue) - bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue) - fgIntensity := winAttr & foregroundIntensity - bgIntensity := winAttr & backgroundIntensity - underline := winAttr & underscore - otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore) - return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes} -} - -func convertWinAttr(textAttr *textAttributes) uint16 { - var winAttr uint16 - winAttr |= textAttr.foregroundColor - winAttr |= textAttr.backgroundColor - winAttr |= textAttr.foregroundIntensity - winAttr |= textAttr.backgroundIntensity - winAttr |= textAttr.underscore - winAttr |= textAttr.otherAttributes - return winAttr -} - -func changeColor(param []byte) parseResult { - screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) - if screenInfo == nil { - return noConsole - } - - winAttr := convertTextAttr(screenInfo.WAttributes) - strParam := string(param) - if len(strParam) <= 0 { - strParam = "0" - } - csiParam := strings.Split(strParam, string(separatorChar)) - for _, p := range csiParam { - c, ok := colorMap[p] - switch { - case !ok: - switch p { - case ansiReset: - winAttr.foregroundColor = defaultAttr.foregroundColor - winAttr.backgroundColor = defaultAttr.backgroundColor - winAttr.foregroundIntensity = defaultAttr.foregroundIntensity - winAttr.backgroundIntensity = defaultAttr.backgroundIntensity - winAttr.underscore = 0 - winAttr.otherAttributes = 0 - case ansiIntensityOn: - winAttr.foregroundIntensity = foregroundIntensity - case ansiIntensityOff: - winAttr.foregroundIntensity = 0 - case ansiUnderlineOn: - winAttr.underscore = underscore - case ansiUnderlineOff: - winAttr.underscore = 0 - case ansiBlinkOn: - winAttr.backgroundIntensity = backgroundIntensity - case ansiBlinkOff: - winAttr.backgroundIntensity = 0 - default: - // unknown code - } - case c.drawType == foreground: - winAttr.foregroundColor = c.code - case c.drawType == background: - winAttr.backgroundColor = c.code - } - } - winTextAttribute := convertWinAttr(winAttr) - setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute) - - return changedColor -} - -func parseEscapeSequence(command byte, param []byte) parseResult { - if defaultAttr == nil { - return noConsole - } - - switch command { - case sgrCode: - return changeColor(param) - default: - return unknown - } -} - -func (cw *ansiColorWriter) flushBuffer() (int, error) { - return cw.flushTo(cw.w) -} - -func (cw *ansiColorWriter) resetBuffer() (int, error) { - return cw.flushTo(nil) -} - -func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) { - var n1, n2 int - var err error - - startBytes := cw.paramStartBuf.Bytes() - cw.paramStartBuf.Reset() - if w != nil { - n1, err = cw.w.Write(startBytes) - if err != nil { - return n1, err - } - } else { - n1 = len(startBytes) - } - paramBytes := cw.paramBuf.Bytes() - cw.paramBuf.Reset() - if w != nil { - n2, err = cw.w.Write(paramBytes) - if err != nil { - return n1 + n2, err - } - } else { - n2 = len(paramBytes) - } - return n1 + n2, nil -} - -func isParameterChar(b byte) bool { - return ('0' <= b && b <= '9') || b == separatorChar -} - -func (cw *ansiColorWriter) Write(p []byte) (int, error) { - var r, nw, first, last int - if cw.mode != DiscardNonColorEscSeq { - cw.state = outsideCsiCode - cw.resetBuffer() - } - - var err error - for i, ch := range p { - switch cw.state { - case outsideCsiCode: - if ch == firstCsiChar { - cw.paramStartBuf.WriteByte(ch) - cw.state = firstCsiCode - } - case firstCsiCode: - switch ch { - case firstCsiChar: - cw.paramStartBuf.WriteByte(ch) - break - case secondeCsiChar: - cw.paramStartBuf.WriteByte(ch) - cw.state = secondCsiCode - last = i - 1 - default: - cw.resetBuffer() - cw.state = outsideCsiCode - } - case secondCsiCode: - if isParameterChar(ch) { - cw.paramBuf.WriteByte(ch) - } else { - nw, err = cw.w.Write(p[first:last]) - r += nw - if err != nil { - return r, err - } - first = i + 1 - result := parseEscapeSequence(ch, cw.paramBuf.Bytes()) - if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) { - cw.paramBuf.WriteByte(ch) - nw, err := cw.flushBuffer() - if err != nil { - return r, err - } - r += nw - } else { - n, _ := cw.resetBuffer() - // Add one more to the size of the buffer for the last ch - r += n + 1 - } - - cw.state = outsideCsiCode - } - default: - cw.state = outsideCsiCode - } - } - - if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode { - nw, err = cw.w.Write(p[first:]) - r += nw - } - - return r, err -} diff --git a/vendor/github.com/astaxie/beego/logs/conn.go b/vendor/github.com/astaxie/beego/logs/conn.go deleted file mode 100644 index 6d5bf6bfc..000000000 --- a/vendor/github.com/astaxie/beego/logs/conn.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "encoding/json" - "io" - "net" - "time" -) - -// connWriter implements LoggerInterface. -// it writes messages in keep-live tcp connection. -type connWriter struct { - lg *logWriter - innerWriter io.WriteCloser - ReconnectOnMsg bool `json:"reconnectOnMsg"` - Reconnect bool `json:"reconnect"` - Net string `json:"net"` - Addr string `json:"addr"` - Level int `json:"level"` -} - -// NewConn create new ConnWrite returning as LoggerInterface. -func NewConn() Logger { - conn := new(connWriter) - conn.Level = LevelTrace - return conn -} - -// Init init connection writer with json config. -// json config only need key "level". -func (c *connWriter) Init(jsonConfig string) error { - return json.Unmarshal([]byte(jsonConfig), c) -} - -// WriteMsg write message in connection. -// if connection is down, try to re-connect. -func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > c.Level { - return nil - } - if c.needToConnectOnMsg() { - err := c.connect() - if err != nil { - return err - } - } - - if c.ReconnectOnMsg { - defer c.innerWriter.Close() - } - - c.lg.println(when, msg) - return nil -} - -// Flush implementing method. empty. -func (c *connWriter) Flush() { - -} - -// Destroy destroy connection writer and close tcp listener. -func (c *connWriter) Destroy() { - if c.innerWriter != nil { - c.innerWriter.Close() - } -} - -func (c *connWriter) connect() error { - if c.innerWriter != nil { - c.innerWriter.Close() - c.innerWriter = nil - } - - conn, err := net.Dial(c.Net, c.Addr) - if err != nil { - return err - } - - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - } - - c.innerWriter = conn - c.lg = newLogWriter(conn) - return nil -} - -func (c *connWriter) needToConnectOnMsg() bool { - if c.Reconnect { - c.Reconnect = false - return true - } - - if c.innerWriter == nil { - return true - } - - return c.ReconnectOnMsg -} - -func init() { - Register(AdapterConn, NewConn) -} diff --git a/vendor/github.com/astaxie/beego/logs/console.go b/vendor/github.com/astaxie/beego/logs/console.go deleted file mode 100644 index e75f2a1b1..000000000 --- a/vendor/github.com/astaxie/beego/logs/console.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "encoding/json" - "os" - "runtime" - "time" -) - -// brush is a color join function -type brush func(string) string - -// newBrush return a fix color Brush -func newBrush(color string) brush { - pre := "\033[" - reset := "\033[0m" - return func(text string) string { - return pre + color + "m" + text + reset - } -} - -var colors = []brush{ - newBrush("1;37"), // Emergency white - newBrush("1;36"), // Alert cyan - newBrush("1;35"), // Critical magenta - newBrush("1;31"), // Error red - newBrush("1;33"), // Warning yellow - newBrush("1;32"), // Notice green - newBrush("1;34"), // Informational blue - newBrush("1;44"), // Debug Background blue -} - -// consoleWriter implements LoggerInterface and writes messages to terminal. -type consoleWriter struct { - lg *logWriter - Level int `json:"level"` - Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color -} - -// NewConsole create ConsoleWriter returning as LoggerInterface. -func NewConsole() Logger { - cw := &consoleWriter{ - lg: newLogWriter(os.Stdout), - Level: LevelDebug, - Colorful: runtime.GOOS != "windows", - } - return cw -} - -// Init init console logger. -// jsonConfig like '{"level":LevelTrace}'. -func (c *consoleWriter) Init(jsonConfig string) error { - if len(jsonConfig) == 0 { - return nil - } - err := json.Unmarshal([]byte(jsonConfig), c) - if runtime.GOOS == "windows" { - c.Colorful = false - } - return err -} - -// WriteMsg write message in console. -func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > c.Level { - return nil - } - if c.Colorful { - msg = colors[level](msg) - } - c.lg.println(when, msg) - return nil -} - -// Destroy implementing method. empty. -func (c *consoleWriter) Destroy() { - -} - -// Flush implementing method. empty. -func (c *consoleWriter) Flush() { - -} - -func init() { - Register(AdapterConsole, NewConsole) -} diff --git a/vendor/github.com/astaxie/beego/logs/file.go b/vendor/github.com/astaxie/beego/logs/file.go deleted file mode 100644 index 588f78603..000000000 --- a/vendor/github.com/astaxie/beego/logs/file.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "time" -) - -// fileLogWriter implements LoggerInterface. -// It writes messages by lines limit, file size limit, or time frequency. -type fileLogWriter struct { - sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize - // The opened file - Filename string `json:"filename"` - fileWriter *os.File - - // Rotate at line - MaxLines int `json:"maxlines"` - maxLinesCurLines int - - MaxFiles int `json:"maxfiles"` - MaxFilesCurFiles int - - // Rotate at size - MaxSize int `json:"maxsize"` - maxSizeCurSize int - - // Rotate daily - Daily bool `json:"daily"` - MaxDays int64 `json:"maxdays"` - dailyOpenDate int - dailyOpenTime time.Time - - // Rotate hourly - Hourly bool `json:"hourly"` - MaxHours int64 `json:"maxhours"` - hourlyOpenDate int - hourlyOpenTime time.Time - - Rotate bool `json:"rotate"` - - Level int `json:"level"` - - Perm string `json:"perm"` - - RotatePerm string `json:"rotateperm"` - - fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix -} - -// newFileWriter create a FileLogWriter returning as LoggerInterface. -func newFileWriter() Logger { - w := &fileLogWriter{ - Daily: true, - MaxDays: 7, - Hourly: false, - MaxHours: 168, - Rotate: true, - RotatePerm: "0440", - Level: LevelTrace, - Perm: "0660", - MaxLines: 10000000, - MaxFiles: 999, - MaxSize: 1 << 28, - } - return w -} - -// Init file logger with json config. -// jsonConfig like: -// { -// "filename":"logs/beego.log", -// "maxLines":10000, -// "maxsize":1024, -// "daily":true, -// "maxDays":15, -// "rotate":true, -// "perm":"0600" -// } -func (w *fileLogWriter) Init(jsonConfig string) error { - err := json.Unmarshal([]byte(jsonConfig), w) - if err != nil { - return err - } - if len(w.Filename) == 0 { - return errors.New("jsonconfig must have filename") - } - w.suffix = filepath.Ext(w.Filename) - w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix) - if w.suffix == "" { - w.suffix = ".log" - } - err = w.startLogger() - return err -} - -// start file logger. create log file and set to locker-inside file writer. -func (w *fileLogWriter) startLogger() error { - file, err := w.createLogFile() - if err != nil { - return err - } - if w.fileWriter != nil { - w.fileWriter.Close() - } - w.fileWriter = file - return w.initFd() -} - -func (w *fileLogWriter) needRotateDaily(size int, day int) bool { - return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || - (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || - (w.Daily && day != w.dailyOpenDate) -} - -func (w *fileLogWriter) needRotateHourly(size int, hour int) bool { - return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || - (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || - (w.Hourly && hour != w.hourlyOpenDate) - -} - -// WriteMsg write logger message into file. -func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > w.Level { - return nil - } - hd, d, h := formatTimeHeader(when) - msg = string(hd) + msg + "\n" - if w.Rotate { - w.RLock() - if w.needRotateHourly(len(msg), h) { - w.RUnlock() - w.Lock() - if w.needRotateHourly(len(msg), h) { - if err := w.doRotate(when); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) - } - } - w.Unlock() - } else if w.needRotateDaily(len(msg), d) { - w.RUnlock() - w.Lock() - if w.needRotateDaily(len(msg), d) { - if err := w.doRotate(when); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) - } - } - w.Unlock() - } else { - w.RUnlock() - } - } - - w.Lock() - _, err := w.fileWriter.Write([]byte(msg)) - if err == nil { - w.maxLinesCurLines++ - w.maxSizeCurSize += len(msg) - } - w.Unlock() - return err -} - -func (w *fileLogWriter) createLogFile() (*os.File, error) { - // Open the log file - perm, err := strconv.ParseInt(w.Perm, 8, 64) - if err != nil { - return nil, err - } - - filepath := path.Dir(w.Filename) - os.MkdirAll(filepath, os.FileMode(perm)) - - fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm)) - if err == nil { - // Make sure file perm is user set perm cause of `os.OpenFile` will obey umask - os.Chmod(w.Filename, os.FileMode(perm)) - } - return fd, err -} - -func (w *fileLogWriter) initFd() error { - fd := w.fileWriter - fInfo, err := fd.Stat() - if err != nil { - return fmt.Errorf("get stat err: %s", err) - } - w.maxSizeCurSize = int(fInfo.Size()) - w.dailyOpenTime = time.Now() - w.dailyOpenDate = w.dailyOpenTime.Day() - w.hourlyOpenTime = time.Now() - w.hourlyOpenDate = w.hourlyOpenTime.Hour() - w.maxLinesCurLines = 0 - if w.Hourly { - go w.hourlyRotate(w.hourlyOpenTime) - } else if w.Daily { - go w.dailyRotate(w.dailyOpenTime) - } - if fInfo.Size() > 0 && w.MaxLines > 0 { - count, err := w.lines() - if err != nil { - return err - } - w.maxLinesCurLines = count - } - return nil -} - -func (w *fileLogWriter) dailyRotate(openTime time.Time) { - y, m, d := openTime.Add(24 * time.Hour).Date() - nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location()) - tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100)) - <-tm.C - w.Lock() - if w.needRotateDaily(0, time.Now().Day()) { - if err := w.doRotate(time.Now()); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) - } - } - w.Unlock() -} - -func (w *fileLogWriter) hourlyRotate(openTime time.Time) { - y, m, d := openTime.Add(1 * time.Hour).Date() - h, _, _ := openTime.Add(1 * time.Hour).Clock() - nextHour := time.Date(y, m, d, h, 0, 0, 0, openTime.Location()) - tm := time.NewTimer(time.Duration(nextHour.UnixNano() - openTime.UnixNano() + 100)) - <-tm.C - w.Lock() - if w.needRotateHourly(0, time.Now().Hour()) { - if err := w.doRotate(time.Now()); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) - } - } - w.Unlock() -} - -func (w *fileLogWriter) lines() (int, error) { - fd, err := os.Open(w.Filename) - if err != nil { - return 0, err - } - defer fd.Close() - - buf := make([]byte, 32768) // 32k - count := 0 - lineSep := []byte{'\n'} - - for { - c, err := fd.Read(buf) - if err != nil && err != io.EOF { - return count, err - } - - count += bytes.Count(buf[:c], lineSep) - - if err == io.EOF { - break - } - } - - return count, nil -} - -// DoRotate means it need to write file in new file. -// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size) -func (w *fileLogWriter) doRotate(logTime time.Time) error { - // file exists - // Find the next available number - num := w.MaxFilesCurFiles + 1 - fName := "" - format := "" - var openTime time.Time - rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64) - if err != nil { - return err - } - - _, err = os.Lstat(w.Filename) - if err != nil { - //even if the file is not exist or other ,we should RESTART the logger - goto RESTART_LOGGER - } - - if w.Hourly { - format = "2006010215" - openTime = w.hourlyOpenTime - } else if w.Daily { - format = "2006-01-02" - openTime = w.dailyOpenTime - } - - // only when one of them be setted, then the file would be splited - if w.MaxLines > 0 || w.MaxSize > 0 { - for ; err == nil && num <= w.MaxFiles; num++ { - fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format(format), num, w.suffix) - _, err = os.Lstat(fName) - } - } else { - fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", openTime.Format(format), num, w.suffix) - _, err = os.Lstat(fName) - w.MaxFilesCurFiles = num - } - - // return error if the last file checked still existed - if err == nil { - return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename) - } - - // close fileWriter before rename - w.fileWriter.Close() - - // Rename the file to its new found name - // even if occurs error,we MUST guarantee to restart new logger - err = os.Rename(w.Filename, fName) - if err != nil { - goto RESTART_LOGGER - } - - err = os.Chmod(fName, os.FileMode(rotatePerm)) - -RESTART_LOGGER: - - startLoggerErr := w.startLogger() - go w.deleteOldLog() - - if startLoggerErr != nil { - return fmt.Errorf("Rotate StartLogger: %s", startLoggerErr) - } - if err != nil { - return fmt.Errorf("Rotate: %s", err) - } - return nil -} - -func (w *fileLogWriter) deleteOldLog() { - dir := filepath.Dir(w.Filename) - filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { - defer func() { - if r := recover(); r != nil { - fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r) - } - }() - - if info == nil { - return - } - if w.Hourly { - if !info.IsDir() && info.ModTime().Add(1 * time.Hour * time.Duration(w.MaxHours)).Before(time.Now()) { - if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) && - strings.HasSuffix(filepath.Base(path), w.suffix) { - os.Remove(path) - } - } - } else if w.Daily { - if !info.IsDir() && info.ModTime().Add(24 * time.Hour * time.Duration(w.MaxDays)).Before(time.Now()) { - if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) && - strings.HasSuffix(filepath.Base(path), w.suffix) { - os.Remove(path) - } - } - } - return - }) -} - -// Destroy close the file description, close file writer. -func (w *fileLogWriter) Destroy() { - w.fileWriter.Close() -} - -// Flush flush file logger. -// there are no buffering messages in file logger in memory. -// flush file means sync file from disk. -func (w *fileLogWriter) Flush() { - w.fileWriter.Sync() -} - -func init() { - Register(AdapterFile, newFileWriter) -} diff --git a/vendor/github.com/astaxie/beego/logs/jianliao.go b/vendor/github.com/astaxie/beego/logs/jianliao.go deleted file mode 100644 index 88ba0f9af..000000000 --- a/vendor/github.com/astaxie/beego/logs/jianliao.go +++ /dev/null @@ -1,72 +0,0 @@ -package logs - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" -) - -// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook -type JLWriter struct { - AuthorName string `json:"authorname"` - Title string `json:"title"` - WebhookURL string `json:"webhookurl"` - RedirectURL string `json:"redirecturl,omitempty"` - ImageURL string `json:"imageurl,omitempty"` - Level int `json:"level"` -} - -// newJLWriter create jiaoliao writer. -func newJLWriter() Logger { - return &JLWriter{Level: LevelTrace} -} - -// Init JLWriter with json config string -func (s *JLWriter) Init(jsonconfig string) error { - return json.Unmarshal([]byte(jsonconfig), s) -} - -// WriteMsg write message in smtp writer. -// it will send an email with subject and only this message. -func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > s.Level { - return nil - } - - text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg) - - form := url.Values{} - form.Add("authorName", s.AuthorName) - form.Add("title", s.Title) - form.Add("text", text) - if s.RedirectURL != "" { - form.Add("redirectUrl", s.RedirectURL) - } - if s.ImageURL != "" { - form.Add("imageUrl", s.ImageURL) - } - - resp, err := http.PostForm(s.WebhookURL, form) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) - } - return nil -} - -// Flush implementing method. empty. -func (s *JLWriter) Flush() { -} - -// Destroy implementing method. empty. -func (s *JLWriter) Destroy() { -} - -func init() { - Register(AdapterJianLiao, newJLWriter) -} diff --git a/vendor/github.com/astaxie/beego/logs/log.go b/vendor/github.com/astaxie/beego/logs/log.go deleted file mode 100644 index a36141657..000000000 --- a/vendor/github.com/astaxie/beego/logs/log.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logs provide a general log interface -// Usage: -// -// import "github.com/astaxie/beego/logs" -// -// log := NewLogger(10000) -// log.SetLogger("console", "") -// -// > the first params stand for how many channel -// -// Use it like this: -// -// log.Trace("trace") -// log.Info("info") -// log.Warn("warning") -// log.Debug("debug") -// log.Critical("critical") -// -// more docs http://beego.me/docs/module/logs.md -package logs - -import ( - "fmt" - "log" - "os" - "path" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -// RFC5424 log message levels. -const ( - LevelEmergency = iota - LevelAlert - LevelCritical - LevelError - LevelWarning - LevelNotice - LevelInformational - LevelDebug -) - -// levelLogLogger is defined to implement log.Logger -// the real log level will be LevelEmergency -const levelLoggerImpl = -1 - -// Name for adapter with beego official support -const ( - AdapterConsole = "console" - AdapterFile = "file" - AdapterMultiFile = "multifile" - AdapterMail = "smtp" - AdapterConn = "conn" - AdapterEs = "es" - AdapterJianLiao = "jianliao" - AdapterSlack = "slack" - AdapterAliLS = "alils" -) - -// Legacy log level constants to ensure backwards compatibility. -const ( - LevelInfo = LevelInformational - LevelTrace = LevelDebug - LevelWarn = LevelWarning -) - -type newLoggerFunc func() Logger - -// Logger defines the behavior of a log provider. -type Logger interface { - Init(config string) error - WriteMsg(when time.Time, msg string, level int) error - Destroy() - Flush() -} - -var adapters = make(map[string]newLoggerFunc) -var levelPrefix = [LevelDebug + 1]string{"[M] ", "[A] ", "[C] ", "[E] ", "[W] ", "[N] ", "[I] ", "[D] "} - -// Register makes a log provide available by the provided name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, log newLoggerFunc) { - if log == nil { - panic("logs: Register provide is nil") - } - if _, dup := adapters[name]; dup { - panic("logs: Register called twice for provider " + name) - } - adapters[name] = log -} - -// BeeLogger is default logger in beego application. -// it can contain several providers and log message into all providers. -type BeeLogger struct { - lock sync.Mutex - level int - init bool - enableFuncCallDepth bool - loggerFuncCallDepth int - asynchronous bool - prefix string - msgChanLen int64 - msgChan chan *logMsg - signalChan chan string - wg sync.WaitGroup - outputs []*nameLogger -} - -const defaultAsyncMsgLen = 1e3 - -type nameLogger struct { - Logger - name string -} - -type logMsg struct { - level int - msg string - when time.Time -} - -var logMsgPool *sync.Pool - -// NewLogger returns a new BeeLogger. -// channelLen means the number of messages in chan(used where asynchronous is true). -// if the buffering chan is full, logger adapters write to file or other way. -func NewLogger(channelLens ...int64) *BeeLogger { - bl := new(BeeLogger) - bl.level = LevelDebug - bl.loggerFuncCallDepth = 2 - bl.msgChanLen = append(channelLens, 0)[0] - if bl.msgChanLen <= 0 { - bl.msgChanLen = defaultAsyncMsgLen - } - bl.signalChan = make(chan string, 1) - bl.setLogger(AdapterConsole) - return bl -} - -// Async set the log to asynchronous and start the goroutine -func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger { - bl.lock.Lock() - defer bl.lock.Unlock() - if bl.asynchronous { - return bl - } - bl.asynchronous = true - if len(msgLen) > 0 && msgLen[0] > 0 { - bl.msgChanLen = msgLen[0] - } - bl.msgChan = make(chan *logMsg, bl.msgChanLen) - logMsgPool = &sync.Pool{ - New: func() interface{} { - return &logMsg{} - }, - } - bl.wg.Add(1) - go bl.startLogger() - return bl -} - -// SetLogger provides a given logger adapter into BeeLogger with config string. -// config need to be correct JSON as string: {"interval":360}. -func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error { - config := append(configs, "{}")[0] - for _, l := range bl.outputs { - if l.name == adapterName { - return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName) - } - } - - log, ok := adapters[adapterName] - if !ok { - return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) - } - - lg := log() - err := lg.Init(config) - if err != nil { - fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error()) - return err - } - bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg}) - return nil -} - -// SetLogger provides a given logger adapter into BeeLogger with config string. -// config need to be correct JSON as string: {"interval":360}. -func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error { - bl.lock.Lock() - defer bl.lock.Unlock() - if !bl.init { - bl.outputs = []*nameLogger{} - bl.init = true - } - return bl.setLogger(adapterName, configs...) -} - -// DelLogger remove a logger adapter in BeeLogger. -func (bl *BeeLogger) DelLogger(adapterName string) error { - bl.lock.Lock() - defer bl.lock.Unlock() - outputs := []*nameLogger{} - for _, lg := range bl.outputs { - if lg.name == adapterName { - lg.Destroy() - } else { - outputs = append(outputs, lg) - } - } - if len(outputs) == len(bl.outputs) { - return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) - } - bl.outputs = outputs - return nil -} - -func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) { - for _, l := range bl.outputs { - err := l.WriteMsg(when, msg, level) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err) - } - } -} - -func (bl *BeeLogger) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - // writeMsg will always add a '\n' character - if p[len(p)-1] == '\n' { - p = p[0: len(p)-1] - } - // set levelLoggerImpl to ensure all log message will be write out - err = bl.writeMsg(levelLoggerImpl, string(p)) - if err == nil { - return len(p), err - } - return 0, err -} - -func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error { - if !bl.init { - bl.lock.Lock() - bl.setLogger(AdapterConsole) - bl.lock.Unlock() - } - - if len(v) > 0 { - msg = fmt.Sprintf(msg, v...) - } - - msg = bl.prefix + " " + msg - - when := time.Now() - if bl.enableFuncCallDepth { - _, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth) - if !ok { - file = "???" - line = 0 - } - _, filename := path.Split(file) - msg = "[" + filename + ":" + strconv.Itoa(line) + "] " + msg - } - - //set level info in front of filename info - if logLevel == levelLoggerImpl { - // set to emergency to ensure all log will be print out correctly - logLevel = LevelEmergency - } else { - msg = levelPrefix[logLevel] + msg - } - - if bl.asynchronous { - lm := logMsgPool.Get().(*logMsg) - lm.level = logLevel - lm.msg = msg - lm.when = when - bl.msgChan <- lm - } else { - bl.writeToLoggers(when, msg, logLevel) - } - return nil -} - -// SetLevel Set log message level. -// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning), -// log providers will not even be sent the message. -func (bl *BeeLogger) SetLevel(l int) { - bl.level = l -} - -// GetLevel Get Current log message level. -func (bl *BeeLogger) GetLevel() int { - return bl.level -} - -// SetLogFuncCallDepth set log funcCallDepth -func (bl *BeeLogger) SetLogFuncCallDepth(d int) { - bl.loggerFuncCallDepth = d -} - -// GetLogFuncCallDepth return log funcCallDepth for wrapper -func (bl *BeeLogger) GetLogFuncCallDepth() int { - return bl.loggerFuncCallDepth -} - -// EnableFuncCallDepth enable log funcCallDepth -func (bl *BeeLogger) EnableFuncCallDepth(b bool) { - bl.enableFuncCallDepth = b -} - -// set prefix -func (bl *BeeLogger) SetPrefix(s string) { - bl.prefix = s -} - -// start logger chan reading. -// when chan is not empty, write logs. -func (bl *BeeLogger) startLogger() { - gameOver := false - for { - select { - case bm := <-bl.msgChan: - bl.writeToLoggers(bm.when, bm.msg, bm.level) - logMsgPool.Put(bm) - case sg := <-bl.signalChan: - // Now should only send "flush" or "close" to bl.signalChan - bl.flush() - if sg == "close" { - for _, l := range bl.outputs { - l.Destroy() - } - bl.outputs = nil - gameOver = true - } - bl.wg.Done() - } - if gameOver { - break - } - } -} - -// Emergency Log EMERGENCY level message. -func (bl *BeeLogger) Emergency(format string, v ...interface{}) { - if LevelEmergency > bl.level { - return - } - bl.writeMsg(LevelEmergency, format, v...) -} - -// Alert Log ALERT level message. -func (bl *BeeLogger) Alert(format string, v ...interface{}) { - if LevelAlert > bl.level { - return - } - bl.writeMsg(LevelAlert, format, v...) -} - -// Critical Log CRITICAL level message. -func (bl *BeeLogger) Critical(format string, v ...interface{}) { - if LevelCritical > bl.level { - return - } - bl.writeMsg(LevelCritical, format, v...) -} - -// Error Log ERROR level message. -func (bl *BeeLogger) Error(format string, v ...interface{}) { - if LevelError > bl.level { - return - } - bl.writeMsg(LevelError, format, v...) -} - -// Warning Log WARNING level message. -func (bl *BeeLogger) Warning(format string, v ...interface{}) { - if LevelWarn > bl.level { - return - } - bl.writeMsg(LevelWarn, format, v...) -} - -// Notice Log NOTICE level message. -func (bl *BeeLogger) Notice(format string, v ...interface{}) { - if LevelNotice > bl.level { - return - } - bl.writeMsg(LevelNotice, format, v...) -} - -// Informational Log INFORMATIONAL level message. -func (bl *BeeLogger) Informational(format string, v ...interface{}) { - if LevelInfo > bl.level { - return - } - bl.writeMsg(LevelInfo, format, v...) -} - -// Debug Log DEBUG level message. -func (bl *BeeLogger) Debug(format string, v ...interface{}) { - if LevelDebug > bl.level { - return - } - bl.writeMsg(LevelDebug, format, v...) -} - -// Warn Log WARN level message. -// compatibility alias for Warning() -func (bl *BeeLogger) Warn(format string, v ...interface{}) { - if LevelWarn > bl.level { - return - } - bl.writeMsg(LevelWarn, format, v...) -} - -// Info Log INFO level message. -// compatibility alias for Informational() -func (bl *BeeLogger) Info(format string, v ...interface{}) { - if LevelInfo > bl.level { - return - } - bl.writeMsg(LevelInfo, format, v...) -} - -// Trace Log TRACE level message. -// compatibility alias for Debug() -func (bl *BeeLogger) Trace(format string, v ...interface{}) { - if LevelDebug > bl.level { - return - } - bl.writeMsg(LevelDebug, format, v...) -} - -// Flush flush all chan data. -func (bl *BeeLogger) Flush() { - if bl.asynchronous { - bl.signalChan <- "flush" - bl.wg.Wait() - bl.wg.Add(1) - return - } - bl.flush() -} - -// Close close logger, flush all chan data and destroy all adapters in BeeLogger. -func (bl *BeeLogger) Close() { - if bl.asynchronous { - bl.signalChan <- "close" - bl.wg.Wait() - close(bl.msgChan) - } else { - bl.flush() - for _, l := range bl.outputs { - l.Destroy() - } - bl.outputs = nil - } - close(bl.signalChan) -} - -// Reset close all outputs, and set bl.outputs to nil -func (bl *BeeLogger) Reset() { - bl.Flush() - for _, l := range bl.outputs { - l.Destroy() - } - bl.outputs = nil -} - -func (bl *BeeLogger) flush() { - if bl.asynchronous { - for { - if len(bl.msgChan) > 0 { - bm := <-bl.msgChan - bl.writeToLoggers(bm.when, bm.msg, bm.level) - logMsgPool.Put(bm) - continue - } - break - } - } - for _, l := range bl.outputs { - l.Flush() - } -} - -// beeLogger references the used application logger. -var beeLogger = NewLogger() - -// GetBeeLogger returns the default BeeLogger -func GetBeeLogger() *BeeLogger { - return beeLogger -} - -var beeLoggerMap = struct { - sync.RWMutex - logs map[string]*log.Logger -}{ - logs: map[string]*log.Logger{}, -} - -// GetLogger returns the default BeeLogger -func GetLogger(prefixes ...string) *log.Logger { - prefix := append(prefixes, "")[0] - if prefix != "" { - prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix)) - } - beeLoggerMap.RLock() - l, ok := beeLoggerMap.logs[prefix] - if ok { - beeLoggerMap.RUnlock() - return l - } - beeLoggerMap.RUnlock() - beeLoggerMap.Lock() - defer beeLoggerMap.Unlock() - l, ok = beeLoggerMap.logs[prefix] - if !ok { - l = log.New(beeLogger, prefix, 0) - beeLoggerMap.logs[prefix] = l - } - return l -} - -// Reset will remove all the adapter -func Reset() { - beeLogger.Reset() -} - -// Async set the beelogger with Async mode and hold msglen messages -func Async(msgLen ...int64) *BeeLogger { - return beeLogger.Async(msgLen...) -} - -// SetLevel sets the global log level used by the simple logger. -func SetLevel(l int) { - beeLogger.SetLevel(l) -} - -// SetPrefix sets the prefix -func SetPrefix(s string) { - beeLogger.SetPrefix(s) -} - -// EnableFuncCallDepth enable log funcCallDepth -func EnableFuncCallDepth(b bool) { - beeLogger.enableFuncCallDepth = b -} - -// SetLogFuncCall set the CallDepth, default is 4 -func SetLogFuncCall(b bool) { - beeLogger.EnableFuncCallDepth(b) - beeLogger.SetLogFuncCallDepth(4) -} - -// SetLogFuncCallDepth set log funcCallDepth -func SetLogFuncCallDepth(d int) { - beeLogger.loggerFuncCallDepth = d -} - -// SetLogger sets a new logger. -func SetLogger(adapter string, config ...string) error { - return beeLogger.SetLogger(adapter, config...) -} - -// Emergency logs a message at emergency level. -func Emergency(f interface{}, v ...interface{}) { - beeLogger.Emergency(formatLog(f, v...)) -} - -// Alert logs a message at alert level. -func Alert(f interface{}, v ...interface{}) { - beeLogger.Alert(formatLog(f, v...)) -} - -// Critical logs a message at critical level. -func Critical(f interface{}, v ...interface{}) { - beeLogger.Critical(formatLog(f, v...)) -} - -// Error logs a message at error level. -func Error(f interface{}, v ...interface{}) { - beeLogger.Error(formatLog(f, v...)) -} - -// Warning logs a message at warning level. -func Warning(f interface{}, v ...interface{}) { - beeLogger.Warn(formatLog(f, v...)) -} - -// Warn compatibility alias for Warning() -func Warn(f interface{}, v ...interface{}) { - beeLogger.Warn(formatLog(f, v...)) -} - -// Notice logs a message at notice level. -func Notice(f interface{}, v ...interface{}) { - beeLogger.Notice(formatLog(f, v...)) -} - -// Informational logs a message at info level. -func Informational(f interface{}, v ...interface{}) { - beeLogger.Info(formatLog(f, v...)) -} - -// Info compatibility alias for Warning() -func Info(f interface{}, v ...interface{}) { - beeLogger.Info(formatLog(f, v...)) -} - -// Debug logs a message at debug level. -func Debug(f interface{}, v ...interface{}) { - beeLogger.Debug(formatLog(f, v...)) -} - -// Trace logs a message at trace level. -// compatibility alias for Warning() -func Trace(f interface{}, v ...interface{}) { - beeLogger.Trace(formatLog(f, v...)) -} - -func formatLog(f interface{}, v ...interface{}) string { - var msg string - switch f.(type) { - case string: - msg = f.(string) - if len(v) == 0 { - return msg - } - if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") { - //format string - } else { - //do not contain format char - msg += strings.Repeat(" %v", len(v)) - } - default: - msg = fmt.Sprint(f) - if len(v) == 0 { - return msg - } - msg += strings.Repeat(" %v", len(v)) - } - return fmt.Sprintf(msg, v...) -} diff --git a/vendor/github.com/astaxie/beego/logs/logger.go b/vendor/github.com/astaxie/beego/logs/logger.go deleted file mode 100644 index 428d3aa06..000000000 --- a/vendor/github.com/astaxie/beego/logs/logger.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "fmt" - "io" - "os" - "sync" - "time" -) - -type logWriter struct { - sync.Mutex - writer io.Writer -} - -func newLogWriter(wr io.Writer) *logWriter { - return &logWriter{writer: wr} -} - -func (lg *logWriter) println(when time.Time, msg string) { - lg.Lock() - h, _, _:= formatTimeHeader(when) - lg.writer.Write(append(append(h, msg...), '\n')) - lg.Unlock() -} - -type outputMode int - -// DiscardNonColorEscSeq supports the divided color escape sequence. -// But non-color escape sequence is not output. -// Please use the OutputNonColorEscSeq If you want to output a non-color -// escape sequences such as ncurses. However, it does not support the divided -// color escape sequence. -const ( - _ outputMode = iota - DiscardNonColorEscSeq - OutputNonColorEscSeq -) - -// NewAnsiColorWriter creates and initializes a new ansiColorWriter -// using io.Writer w as its initial contents. -// In the console of Windows, which change the foreground and background -// colors of the text by the escape sequence. -// In the console of other systems, which writes to w all text. -func NewAnsiColorWriter(w io.Writer) io.Writer { - return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq) -} - -// NewModeAnsiColorWriter create and initializes a new ansiColorWriter -// by specifying the outputMode. -func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { - if _, ok := w.(*ansiColorWriter); !ok { - return &ansiColorWriter{ - w: w, - mode: mode, - } - } - return w -} - -const ( - y1 = `0123456789` - y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` - y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999` - y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` - mo1 = `000000000111` - mo2 = `123456789012` - d1 = `0000000001111111111222222222233` - d2 = `1234567890123456789012345678901` - h1 = `000000000011111111112222` - h2 = `012345678901234567890123` - mi1 = `000000000011111111112222222222333333333344444444445555555555` - mi2 = `012345678901234567890123456789012345678901234567890123456789` - s1 = `000000000011111111112222222222333333333344444444445555555555` - s2 = `012345678901234567890123456789012345678901234567890123456789` - ns1 = `0123456789` -) - -func formatTimeHeader(when time.Time) ([]byte, int, int) { - y, mo, d := when.Date() - h, mi, s := when.Clock() - ns := when.Nanosecond() / 1000000 - //len("2006/01/02 15:04:05.123 ")==24 - var buf [24]byte - - buf[0] = y1[y/1000%10] - buf[1] = y2[y/100] - buf[2] = y3[y-y/100*100] - buf[3] = y4[y-y/100*100] - buf[4] = '/' - buf[5] = mo1[mo-1] - buf[6] = mo2[mo-1] - buf[7] = '/' - buf[8] = d1[d-1] - buf[9] = d2[d-1] - buf[10] = ' ' - buf[11] = h1[h] - buf[12] = h2[h] - buf[13] = ':' - buf[14] = mi1[mi] - buf[15] = mi2[mi] - buf[16] = ':' - buf[17] = s1[s] - buf[18] = s2[s] - buf[19] = '.' - buf[20] = ns1[ns/100] - buf[21] = ns1[ns%100/10] - buf[22] = ns1[ns%10] - - buf[23] = ' ' - - return buf[0:], d, h -} - -var ( - green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) - white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) - yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) - red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) - blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) - magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) - cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) - - w32Green = string([]byte{27, 91, 52, 50, 109}) - w32White = string([]byte{27, 91, 52, 55, 109}) - w32Yellow = string([]byte{27, 91, 52, 51, 109}) - w32Red = string([]byte{27, 91, 52, 49, 109}) - w32Blue = string([]byte{27, 91, 52, 52, 109}) - w32Magenta = string([]byte{27, 91, 52, 53, 109}) - w32Cyan = string([]byte{27, 91, 52, 54, 109}) - - reset = string([]byte{27, 91, 48, 109}) -) - -// ColorByStatus return color by http code -// 2xx return Green -// 3xx return White -// 4xx return Yellow -// 5xx return Red -func ColorByStatus(cond bool, code int) string { - switch { - case code >= 200 && code < 300: - return map[bool]string{true: green, false: w32Green}[cond] - case code >= 300 && code < 400: - return map[bool]string{true: white, false: w32White}[cond] - case code >= 400 && code < 500: - return map[bool]string{true: yellow, false: w32Yellow}[cond] - default: - return map[bool]string{true: red, false: w32Red}[cond] - } -} - -// ColorByMethod return color by http code -// GET return Blue -// POST return Cyan -// PUT return Yellow -// DELETE return Red -// PATCH return Green -// HEAD return Magenta -// OPTIONS return WHITE -func ColorByMethod(cond bool, method string) string { - switch method { - case "GET": - return map[bool]string{true: blue, false: w32Blue}[cond] - case "POST": - return map[bool]string{true: cyan, false: w32Cyan}[cond] - case "PUT": - return map[bool]string{true: yellow, false: w32Yellow}[cond] - case "DELETE": - return map[bool]string{true: red, false: w32Red}[cond] - case "PATCH": - return map[bool]string{true: green, false: w32Green}[cond] - case "HEAD": - return map[bool]string{true: magenta, false: w32Magenta}[cond] - case "OPTIONS": - return map[bool]string{true: white, false: w32White}[cond] - default: - return reset - } -} - -// Guard Mutex to guarantee atomic of W32Debug(string) function -var mu sync.Mutex - -// W32Debug Helper method to output colored logs in Windows terminals -func W32Debug(msg string) { - mu.Lock() - defer mu.Unlock() - - current := time.Now() - w := NewAnsiColorWriter(os.Stdout) - - fmt.Fprintf(w, "[beego] %v %s\n", current.Format("2006/01/02 - 15:04:05"), msg) -} diff --git a/vendor/github.com/astaxie/beego/logs/multifile.go b/vendor/github.com/astaxie/beego/logs/multifile.go deleted file mode 100644 index 901682743..000000000 --- a/vendor/github.com/astaxie/beego/logs/multifile.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "encoding/json" - "time" -) - -// A filesLogWriter manages several fileLogWriter -// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file -// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log -// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log -// the rotate attribute also acts like fileLogWriter -type multiFileLogWriter struct { - writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter - fullLogWriter *fileLogWriter - Separate []string `json:"separate"` -} - -var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"} - -// Init file logger with json config. -// jsonConfig like: -// { -// "filename":"logs/beego.log", -// "maxLines":0, -// "maxsize":0, -// "daily":true, -// "maxDays":15, -// "rotate":true, -// "perm":0600, -// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"], -// } - -func (f *multiFileLogWriter) Init(config string) error { - writer := newFileWriter().(*fileLogWriter) - err := writer.Init(config) - if err != nil { - return err - } - f.fullLogWriter = writer - f.writers[LevelDebug+1] = writer - - //unmarshal "separate" field to f.Separate - json.Unmarshal([]byte(config), f) - - jsonMap := map[string]interface{}{} - json.Unmarshal([]byte(config), &jsonMap) - - for i := LevelEmergency; i < LevelDebug+1; i++ { - for _, v := range f.Separate { - if v == levelNames[i] { - jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix - jsonMap["level"] = i - bs, _ := json.Marshal(jsonMap) - writer = newFileWriter().(*fileLogWriter) - err := writer.Init(string(bs)) - if err != nil { - return err - } - f.writers[i] = writer - } - } - } - - return nil -} - -func (f *multiFileLogWriter) Destroy() { - for i := 0; i < len(f.writers); i++ { - if f.writers[i] != nil { - f.writers[i].Destroy() - } - } -} - -func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error { - if f.fullLogWriter != nil { - f.fullLogWriter.WriteMsg(when, msg, level) - } - for i := 0; i < len(f.writers)-1; i++ { - if f.writers[i] != nil { - if level == f.writers[i].Level { - f.writers[i].WriteMsg(when, msg, level) - } - } - } - return nil -} - -func (f *multiFileLogWriter) Flush() { - for i := 0; i < len(f.writers); i++ { - if f.writers[i] != nil { - f.writers[i].Flush() - } - } -} - -// newFilesWriter create a FileLogWriter returning as LoggerInterface. -func newFilesWriter() Logger { - return &multiFileLogWriter{} -} - -func init() { - Register(AdapterMultiFile, newFilesWriter) -} diff --git a/vendor/github.com/astaxie/beego/logs/slack.go b/vendor/github.com/astaxie/beego/logs/slack.go deleted file mode 100644 index 1cd2e5aee..000000000 --- a/vendor/github.com/astaxie/beego/logs/slack.go +++ /dev/null @@ -1,60 +0,0 @@ -package logs - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" -) - -// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook -type SLACKWriter struct { - WebhookURL string `json:"webhookurl"` - Level int `json:"level"` -} - -// newSLACKWriter create jiaoliao writer. -func newSLACKWriter() Logger { - return &SLACKWriter{Level: LevelTrace} -} - -// Init SLACKWriter with json config string -func (s *SLACKWriter) Init(jsonconfig string) error { - return json.Unmarshal([]byte(jsonconfig), s) -} - -// WriteMsg write message in smtp writer. -// it will send an email with subject and only this message. -func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > s.Level { - return nil - } - - text := fmt.Sprintf("{\"text\": \"%s %s\"}", when.Format("2006-01-02 15:04:05"), msg) - - form := url.Values{} - form.Add("payload", text) - - resp, err := http.PostForm(s.WebhookURL, form) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) - } - return nil -} - -// Flush implementing method. empty. -func (s *SLACKWriter) Flush() { -} - -// Destroy implementing method. empty. -func (s *SLACKWriter) Destroy() { -} - -func init() { - Register(AdapterSlack, newSLACKWriter) -} diff --git a/vendor/github.com/astaxie/beego/logs/smtp.go b/vendor/github.com/astaxie/beego/logs/smtp.go deleted file mode 100644 index 6208d7b85..000000000 --- a/vendor/github.com/astaxie/beego/logs/smtp.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "net" - "net/smtp" - "strings" - "time" -) - -// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server. -type SMTPWriter struct { - Username string `json:"username"` - Password string `json:"password"` - Host string `json:"host"` - Subject string `json:"subject"` - FromAddress string `json:"fromAddress"` - RecipientAddresses []string `json:"sendTos"` - Level int `json:"level"` -} - -// NewSMTPWriter create smtp writer. -func newSMTPWriter() Logger { - return &SMTPWriter{Level: LevelTrace} -} - -// Init smtp writer with json config. -// config like: -// { -// "username":"example@gmail.com", -// "password:"password", -// "host":"smtp.gmail.com:465", -// "subject":"email title", -// "fromAddress":"from@example.com", -// "sendTos":["email1","email2"], -// "level":LevelError -// } -func (s *SMTPWriter) Init(jsonconfig string) error { - return json.Unmarshal([]byte(jsonconfig), s) -} - -func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth { - if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 { - return nil - } - return smtp.PlainAuth( - "", - s.Username, - s.Password, - host, - ) -} - -func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error { - client, err := smtp.Dial(hostAddressWithPort) - if err != nil { - return err - } - - host, _, _ := net.SplitHostPort(hostAddressWithPort) - tlsConn := &tls.Config{ - InsecureSkipVerify: true, - ServerName: host, - } - if err = client.StartTLS(tlsConn); err != nil { - return err - } - - if auth != nil { - if err = client.Auth(auth); err != nil { - return err - } - } - - if err = client.Mail(fromAddress); err != nil { - return err - } - - for _, rec := range recipients { - if err = client.Rcpt(rec); err != nil { - return err - } - } - - w, err := client.Data() - if err != nil { - return err - } - _, err = w.Write(msgContent) - if err != nil { - return err - } - - err = w.Close() - if err != nil { - return err - } - - return client.Quit() -} - -// WriteMsg write message in smtp writer. -// it will send an email with subject and only this message. -func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > s.Level { - return nil - } - - hp := strings.Split(s.Host, ":") - - // Set up authentication information. - auth := s.getSMTPAuth(hp[0]) - - // Connect to the server, authenticate, set the sender and recipient, - // and send the email all in one step. - contentType := "Content-Type: text/plain" + "; charset=UTF-8" - mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress + - ">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", when.Format("2006-01-02 15:04:05")) + msg) - - return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg) -} - -// Flush implementing method. empty. -func (s *SMTPWriter) Flush() { -} - -// Destroy implementing method. empty. -func (s *SMTPWriter) Destroy() { -} - -func init() { - Register(AdapterMail, newSMTPWriter) -} diff --git a/vendor/github.com/astaxie/beego/mime.go b/vendor/github.com/astaxie/beego/mime.go deleted file mode 100644 index ca2878ab2..000000000 --- a/vendor/github.com/astaxie/beego/mime.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -var mimemaps = map[string]string{ - ".3dm": "x-world/x-3dmf", - ".3dmf": "x-world/x-3dmf", - ".7z": "application/x-7z-compressed", - ".a": "application/octet-stream", - ".aab": "application/x-authorware-bin", - ".aam": "application/x-authorware-map", - ".aas": "application/x-authorware-seg", - ".abc": "text/vndabc", - ".ace": "application/x-ace-compressed", - ".acgi": "text/html", - ".afl": "video/animaflex", - ".ai": "application/postscript", - ".aif": "audio/aiff", - ".aifc": "audio/aiff", - ".aiff": "audio/aiff", - ".aim": "application/x-aim", - ".aip": "text/x-audiosoft-intra", - ".alz": "application/x-alz-compressed", - ".ani": "application/x-navi-animation", - ".aos": "application/x-nokia-9000-communicator-add-on-software", - ".aps": "application/mime", - ".apk": "application/vnd.android.package-archive", - ".arc": "application/x-arc-compressed", - ".arj": "application/arj", - ".art": "image/x-jg", - ".asf": "video/x-ms-asf", - ".asm": "text/x-asm", - ".asp": "text/asp", - ".asx": "application/x-mplayer2", - ".au": "audio/basic", - ".avi": "video/x-msvideo", - ".avs": "video/avs-video", - ".bcpio": "application/x-bcpio", - ".bin": "application/mac-binary", - ".bmp": "image/bmp", - ".boo": "application/book", - ".book": "application/book", - ".boz": "application/x-bzip2", - ".bsh": "application/x-bsh", - ".bz2": "application/x-bzip2", - ".bz": "application/x-bzip", - ".c++": "text/plain", - ".c": "text/x-c", - ".cab": "application/vnd.ms-cab-compressed", - ".cat": "application/vndms-pkiseccat", - ".cc": "text/x-c", - ".ccad": "application/clariscad", - ".cco": "application/x-cocoa", - ".cdf": "application/cdf", - ".cer": "application/pkix-cert", - ".cha": "application/x-chat", - ".chat": "application/x-chat", - ".chrt": "application/vnd.kde.kchart", - ".class": "application/java", - ".com": "text/plain", - ".conf": "text/plain", - ".cpio": "application/x-cpio", - ".cpp": "text/x-c", - ".cpt": "application/mac-compactpro", - ".crl": "application/pkcs-crl", - ".crt": "application/pkix-cert", - ".crx": "application/x-chrome-extension", - ".csh": "text/x-scriptcsh", - ".css": "text/css", - ".csv": "text/csv", - ".cxx": "text/plain", - ".dar": "application/x-dar", - ".dcr": "application/x-director", - ".deb": "application/x-debian-package", - ".deepv": "application/x-deepv", - ".def": "text/plain", - ".der": "application/x-x509-ca-cert", - ".dif": "video/x-dv", - ".dir": "application/x-director", - ".divx": "video/divx", - ".dl": "video/dl", - ".dmg": "application/x-apple-diskimage", - ".doc": "application/msword", - ".dot": "application/msword", - ".dp": "application/commonground", - ".drw": "application/drafting", - ".dump": "application/octet-stream", - ".dv": "video/x-dv", - ".dvi": "application/x-dvi", - ".dwf": "drawing/x-dwf=(old)", - ".dwg": "application/acad", - ".dxf": "application/dxf", - ".dxr": "application/x-director", - ".el": "text/x-scriptelisp", - ".elc": "application/x-bytecodeelisp=(compiled=elisp)", - ".eml": "message/rfc822", - ".env": "application/x-envoy", - ".eps": "application/postscript", - ".es": "application/x-esrehber", - ".etx": "text/x-setext", - ".evy": "application/envoy", - ".exe": "application/octet-stream", - ".f77": "text/x-fortran", - ".f90": "text/x-fortran", - ".f": "text/x-fortran", - ".fdf": "application/vndfdf", - ".fif": "application/fractals", - ".fli": "video/fli", - ".flo": "image/florian", - ".flv": "video/x-flv", - ".flx": "text/vndfmiflexstor", - ".fmf": "video/x-atomic3d-feature", - ".for": "text/x-fortran", - ".fpx": "image/vndfpx", - ".frl": "application/freeloader", - ".funk": "audio/make", - ".g3": "image/g3fax", - ".g": "text/plain", - ".gif": "image/gif", - ".gl": "video/gl", - ".gsd": "audio/x-gsm", - ".gsm": "audio/x-gsm", - ".gsp": "application/x-gsp", - ".gss": "application/x-gss", - ".gtar": "application/x-gtar", - ".gz": "application/x-compressed", - ".gzip": "application/x-gzip", - ".h": "text/x-h", - ".hdf": "application/x-hdf", - ".help": "application/x-helpfile", - ".hgl": "application/vndhp-hpgl", - ".hh": "text/x-h", - ".hlb": "text/x-script", - ".hlp": "application/hlp", - ".hpg": "application/vndhp-hpgl", - ".hpgl": "application/vndhp-hpgl", - ".hqx": "application/binhex", - ".hta": "application/hta", - ".htc": "text/x-component", - ".htm": "text/html", - ".html": "text/html", - ".htmls": "text/html", - ".htt": "text/webviewhtml", - ".htx": "text/html", - ".ice": "x-conference/x-cooltalk", - ".ico": "image/x-icon", - ".ics": "text/calendar", - ".icz": "text/calendar", - ".idc": "text/plain", - ".ief": "image/ief", - ".iefs": "image/ief", - ".iges": "application/iges", - ".igs": "application/iges", - ".ima": "application/x-ima", - ".imap": "application/x-httpd-imap", - ".inf": "application/inf", - ".ins": "application/x-internett-signup", - ".ip": "application/x-ip2", - ".isu": "video/x-isvideo", - ".it": "audio/it", - ".iv": "application/x-inventor", - ".ivr": "i-world/i-vrml", - ".ivy": "application/x-livescreen", - ".jam": "audio/x-jam", - ".jav": "text/x-java-source", - ".java": "text/x-java-source", - ".jcm": "application/x-java-commerce", - ".jfif-tbnl": "image/jpeg", - ".jfif": "image/jpeg", - ".jnlp": "application/x-java-jnlp-file", - ".jpe": "image/jpeg", - ".jpeg": "image/jpeg", - ".jpg": "image/jpeg", - ".jps": "image/x-jps", - ".js": "application/javascript", - ".json": "application/json", - ".jut": "image/jutvision", - ".kar": "audio/midi", - ".karbon": "application/vnd.kde.karbon", - ".kfo": "application/vnd.kde.kformula", - ".flw": "application/vnd.kde.kivio", - ".kml": "application/vnd.google-earth.kml+xml", - ".kmz": "application/vnd.google-earth.kmz", - ".kon": "application/vnd.kde.kontour", - ".kpr": "application/vnd.kde.kpresenter", - ".kpt": "application/vnd.kde.kpresenter", - ".ksp": "application/vnd.kde.kspread", - ".kwd": "application/vnd.kde.kword", - ".kwt": "application/vnd.kde.kword", - ".ksh": "text/x-scriptksh", - ".la": "audio/nspaudio", - ".lam": "audio/x-liveaudio", - ".latex": "application/x-latex", - ".lha": "application/lha", - ".lhx": "application/octet-stream", - ".list": "text/plain", - ".lma": "audio/nspaudio", - ".log": "text/plain", - ".lsp": "text/x-scriptlisp", - ".lst": "text/plain", - ".lsx": "text/x-la-asf", - ".ltx": "application/x-latex", - ".lzh": "application/octet-stream", - ".lzx": "application/lzx", - ".m1v": "video/mpeg", - ".m2a": "audio/mpeg", - ".m2v": "video/mpeg", - ".m3u": "audio/x-mpegurl", - ".m": "text/x-m", - ".man": "application/x-troff-man", - ".manifest": "text/cache-manifest", - ".map": "application/x-navimap", - ".mar": "text/plain", - ".mbd": "application/mbedlet", - ".mc$": "application/x-magic-cap-package-10", - ".mcd": "application/mcad", - ".mcf": "text/mcf", - ".mcp": "application/netmc", - ".me": "application/x-troff-me", - ".mht": "message/rfc822", - ".mhtml": "message/rfc822", - ".mid": "application/x-midi", - ".midi": "application/x-midi", - ".mif": "application/x-frame", - ".mime": "message/rfc822", - ".mjf": "audio/x-vndaudioexplosionmjuicemediafile", - ".mjpg": "video/x-motion-jpeg", - ".mm": "application/base64", - ".mme": "application/base64", - ".mod": "audio/mod", - ".moov": "video/quicktime", - ".mov": "video/quicktime", - ".movie": "video/x-sgi-movie", - ".mp2": "audio/mpeg", - ".mp3": "audio/mpeg3", - ".mp4": "video/mp4", - ".mpa": "audio/mpeg", - ".mpc": "application/x-project", - ".mpe": "video/mpeg", - ".mpeg": "video/mpeg", - ".mpg": "video/mpeg", - ".mpga": "audio/mpeg", - ".mpp": "application/vndms-project", - ".mpt": "application/x-project", - ".mpv": "application/x-project", - ".mpx": "application/x-project", - ".mrc": "application/marc", - ".ms": "application/x-troff-ms", - ".mv": "video/x-sgi-movie", - ".my": "audio/make", - ".mzz": "application/x-vndaudioexplosionmzz", - ".nap": "image/naplps", - ".naplps": "image/naplps", - ".nc": "application/x-netcdf", - ".ncm": "application/vndnokiaconfiguration-message", - ".nif": "image/x-niff", - ".niff": "image/x-niff", - ".nix": "application/x-mix-transfer", - ".nsc": "application/x-conference", - ".nvd": "application/x-navidoc", - ".o": "application/octet-stream", - ".oda": "application/oda", - ".odb": "application/vnd.oasis.opendocument.database", - ".odc": "application/vnd.oasis.opendocument.chart", - ".odf": "application/vnd.oasis.opendocument.formula", - ".odg": "application/vnd.oasis.opendocument.graphics", - ".odi": "application/vnd.oasis.opendocument.image", - ".odm": "application/vnd.oasis.opendocument.text-master", - ".odp": "application/vnd.oasis.opendocument.presentation", - ".ods": "application/vnd.oasis.opendocument.spreadsheet", - ".odt": "application/vnd.oasis.opendocument.text", - ".oga": "audio/ogg", - ".ogg": "audio/ogg", - ".ogv": "video/ogg", - ".omc": "application/x-omc", - ".omcd": "application/x-omcdatamaker", - ".omcr": "application/x-omcregerator", - ".otc": "application/vnd.oasis.opendocument.chart-template", - ".otf": "application/vnd.oasis.opendocument.formula-template", - ".otg": "application/vnd.oasis.opendocument.graphics-template", - ".oth": "application/vnd.oasis.opendocument.text-web", - ".oti": "application/vnd.oasis.opendocument.image-template", - ".otm": "application/vnd.oasis.opendocument.text-master", - ".otp": "application/vnd.oasis.opendocument.presentation-template", - ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", - ".ott": "application/vnd.oasis.opendocument.text-template", - ".p10": "application/pkcs10", - ".p12": "application/pkcs-12", - ".p7a": "application/x-pkcs7-signature", - ".p7c": "application/pkcs7-mime", - ".p7m": "application/pkcs7-mime", - ".p7r": "application/x-pkcs7-certreqresp", - ".p7s": "application/pkcs7-signature", - ".p": "text/x-pascal", - ".part": "application/pro_eng", - ".pas": "text/pascal", - ".pbm": "image/x-portable-bitmap", - ".pcl": "application/vndhp-pcl", - ".pct": "image/x-pict", - ".pcx": "image/x-pcx", - ".pdb": "chemical/x-pdb", - ".pdf": "application/pdf", - ".pfunk": "audio/make", - ".pgm": "image/x-portable-graymap", - ".pic": "image/pict", - ".pict": "image/pict", - ".pkg": "application/x-newton-compatible-pkg", - ".pko": "application/vndms-pkipko", - ".pl": "text/x-scriptperl", - ".plx": "application/x-pixclscript", - ".pm4": "application/x-pagemaker", - ".pm5": "application/x-pagemaker", - ".pm": "text/x-scriptperl-module", - ".png": "image/png", - ".pnm": "application/x-portable-anymap", - ".pot": "application/mspowerpoint", - ".pov": "model/x-pov", - ".ppa": "application/vndms-powerpoint", - ".ppm": "image/x-portable-pixmap", - ".pps": "application/mspowerpoint", - ".ppt": "application/mspowerpoint", - ".ppz": "application/mspowerpoint", - ".pre": "application/x-freelance", - ".prt": "application/pro_eng", - ".ps": "application/postscript", - ".psd": "application/octet-stream", - ".pvu": "paleovu/x-pv", - ".pwz": "application/vndms-powerpoint", - ".py": "text/x-scriptphyton", - ".pyc": "application/x-bytecodepython", - ".qcp": "audio/vndqcelp", - ".qd3": "x-world/x-3dmf", - ".qd3d": "x-world/x-3dmf", - ".qif": "image/x-quicktime", - ".qt": "video/quicktime", - ".qtc": "video/x-qtc", - ".qti": "image/x-quicktime", - ".qtif": "image/x-quicktime", - ".ra": "audio/x-pn-realaudio", - ".ram": "audio/x-pn-realaudio", - ".rar": "application/x-rar-compressed", - ".ras": "application/x-cmu-raster", - ".rast": "image/cmu-raster", - ".rexx": "text/x-scriptrexx", - ".rf": "image/vndrn-realflash", - ".rgb": "image/x-rgb", - ".rm": "application/vndrn-realmedia", - ".rmi": "audio/mid", - ".rmm": "audio/x-pn-realaudio", - ".rmp": "audio/x-pn-realaudio", - ".rng": "application/ringing-tones", - ".rnx": "application/vndrn-realplayer", - ".roff": "application/x-troff", - ".rp": "image/vndrn-realpix", - ".rpm": "audio/x-pn-realaudio-plugin", - ".rt": "text/vndrn-realtext", - ".rtf": "text/richtext", - ".rtx": "text/richtext", - ".rv": "video/vndrn-realvideo", - ".s": "text/x-asm", - ".s3m": "audio/s3m", - ".s7z": "application/x-7z-compressed", - ".saveme": "application/octet-stream", - ".sbk": "application/x-tbook", - ".scm": "text/x-scriptscheme", - ".sdml": "text/plain", - ".sdp": "application/sdp", - ".sdr": "application/sounder", - ".sea": "application/sea", - ".set": "application/set", - ".sgm": "text/x-sgml", - ".sgml": "text/x-sgml", - ".sh": "text/x-scriptsh", - ".shar": "application/x-bsh", - ".shtml": "text/x-server-parsed-html", - ".sid": "audio/x-psid", - ".skd": "application/x-koan", - ".skm": "application/x-koan", - ".skp": "application/x-koan", - ".skt": "application/x-koan", - ".sit": "application/x-stuffit", - ".sitx": "application/x-stuffitx", - ".sl": "application/x-seelogo", - ".smi": "application/smil", - ".smil": "application/smil", - ".snd": "audio/basic", - ".sol": "application/solids", - ".spc": "text/x-speech", - ".spl": "application/futuresplash", - ".spr": "application/x-sprite", - ".sprite": "application/x-sprite", - ".spx": "audio/ogg", - ".src": "application/x-wais-source", - ".ssi": "text/x-server-parsed-html", - ".ssm": "application/streamingmedia", - ".sst": "application/vndms-pkicertstore", - ".step": "application/step", - ".stl": "application/sla", - ".stp": "application/step", - ".sv4cpio": "application/x-sv4cpio", - ".sv4crc": "application/x-sv4crc", - ".svf": "image/vnddwg", - ".svg": "image/svg+xml", - ".svr": "application/x-world", - ".swf": "application/x-shockwave-flash", - ".t": "application/x-troff", - ".talk": "text/x-speech", - ".tar": "application/x-tar", - ".tbk": "application/toolbook", - ".tcl": "text/x-scripttcl", - ".tcsh": "text/x-scripttcsh", - ".tex": "application/x-tex", - ".texi": "application/x-texinfo", - ".texinfo": "application/x-texinfo", - ".text": "text/plain", - ".tgz": "application/gnutar", - ".tif": "image/tiff", - ".tiff": "image/tiff", - ".tr": "application/x-troff", - ".tsi": "audio/tsp-audio", - ".tsp": "application/dsptype", - ".tsv": "text/tab-separated-values", - ".turbot": "image/florian", - ".txt": "text/plain", - ".uil": "text/x-uil", - ".uni": "text/uri-list", - ".unis": "text/uri-list", - ".unv": "application/i-deas", - ".uri": "text/uri-list", - ".uris": "text/uri-list", - ".ustar": "application/x-ustar", - ".uu": "text/x-uuencode", - ".uue": "text/x-uuencode", - ".vcd": "application/x-cdlink", - ".vcf": "text/x-vcard", - ".vcard": "text/x-vcard", - ".vcs": "text/x-vcalendar", - ".vda": "application/vda", - ".vdo": "video/vdo", - ".vew": "application/groupwise", - ".viv": "video/vivo", - ".vivo": "video/vivo", - ".vmd": "application/vocaltec-media-desc", - ".vmf": "application/vocaltec-media-file", - ".voc": "audio/voc", - ".vos": "video/vosaic", - ".vox": "audio/voxware", - ".vqe": "audio/x-twinvq-plugin", - ".vqf": "audio/x-twinvq", - ".vql": "audio/x-twinvq-plugin", - ".vrml": "application/x-vrml", - ".vrt": "x-world/x-vrt", - ".vsd": "application/x-visio", - ".vst": "application/x-visio", - ".vsw": "application/x-visio", - ".w60": "application/wordperfect60", - ".w61": "application/wordperfect61", - ".w6w": "application/msword", - ".wav": "audio/wav", - ".wb1": "application/x-qpro", - ".wbmp": "image/vnd.wap.wbmp", - ".web": "application/vndxara", - ".wiz": "application/msword", - ".wk1": "application/x-123", - ".wmf": "windows/metafile", - ".wml": "text/vnd.wap.wml", - ".wmlc": "application/vnd.wap.wmlc", - ".wmls": "text/vnd.wap.wmlscript", - ".wmlsc": "application/vnd.wap.wmlscriptc", - ".word": "application/msword", - ".wp5": "application/wordperfect", - ".wp6": "application/wordperfect", - ".wp": "application/wordperfect", - ".wpd": "application/wordperfect", - ".wq1": "application/x-lotus", - ".wri": "application/mswrite", - ".wrl": "application/x-world", - ".wrz": "model/vrml", - ".wsc": "text/scriplet", - ".wsrc": "application/x-wais-source", - ".wtk": "application/x-wintalk", - ".x-png": "image/png", - ".xbm": "image/x-xbitmap", - ".xdr": "video/x-amt-demorun", - ".xgz": "xgl/drawing", - ".xif": "image/vndxiff", - ".xl": "application/excel", - ".xla": "application/excel", - ".xlb": "application/excel", - ".xlc": "application/excel", - ".xld": "application/excel", - ".xlk": "application/excel", - ".xll": "application/excel", - ".xlm": "application/excel", - ".xls": "application/excel", - ".xlt": "application/excel", - ".xlv": "application/excel", - ".xlw": "application/excel", - ".xm": "audio/xm", - ".xml": "text/xml", - ".xmz": "xgl/movie", - ".xpix": "application/x-vndls-xpix", - ".xpm": "image/x-xpixmap", - ".xsr": "video/x-amt-showrun", - ".xwd": "image/x-xwd", - ".xyz": "chemical/x-pdb", - ".z": "application/x-compress", - ".zip": "application/zip", - ".zoo": "application/octet-stream", - ".zsh": "text/x-scriptzsh", - ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - ".docm": "application/vnd.ms-word.document.macroEnabled.12", - ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", - ".dotm": "application/vnd.ms-word.template.macroEnabled.12", - ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - ".xlsm": "application/vnd.ms-excel.sheet.macroEnabled.12", - ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", - ".xltm": "application/vnd.ms-excel.template.macroEnabled.12", - ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", - ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", - ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", - ".pptm": "application/vnd.ms-powerpoint.presentation.macroEnabled.12", - ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", - ".ppsm": "application/vnd.ms-powerpoint.slideshow.macroEnabled.12", - ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", - ".potm": "application/vnd.ms-powerpoint.template.macroEnabled.12", - ".ppam": "application/vnd.ms-powerpoint.addin.macroEnabled.12", - ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", - ".sldm": "application/vnd.ms-powerpoint.slide.macroEnabled.12", - ".thmx": "application/vnd.ms-officetheme", - ".onetoc": "application/onenote", - ".onetoc2": "application/onenote", - ".onetmp": "application/onenote", - ".onepkg": "application/onenote", - ".key": "application/x-iwork-keynote-sffkey", - ".kth": "application/x-iwork-keynote-sffkth", - ".nmbtemplate": "application/x-iwork-numbers-sfftemplate", - ".numbers": "application/x-iwork-numbers-sffnumbers", - ".pages": "application/x-iwork-pages-sffpages", - ".template": "application/x-iwork-pages-sfftemplate", - ".xpi": "application/x-xpinstall", - ".oex": "application/x-opera-extension", - ".mustache": "text/html", -} diff --git a/vendor/github.com/astaxie/beego/namespace.go b/vendor/github.com/astaxie/beego/namespace.go deleted file mode 100644 index 72f22a720..000000000 --- a/vendor/github.com/astaxie/beego/namespace.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "net/http" - "strings" - - beecontext "github.com/astaxie/beego/context" -) - -type namespaceCond func(*beecontext.Context) bool - -// LinkNamespace used as link action -type LinkNamespace func(*Namespace) - -// Namespace is store all the info -type Namespace struct { - prefix string - handlers *ControllerRegister -} - -// NewNamespace get new Namespace -func NewNamespace(prefix string, params ...LinkNamespace) *Namespace { - ns := &Namespace{ - prefix: prefix, - handlers: NewControllerRegister(), - } - for _, p := range params { - p(ns) - } - return ns -} - -// Cond set condition function -// if cond return true can run this namespace, else can't -// usage: -// ns.Cond(func (ctx *context.Context) bool{ -// if ctx.Input.Domain() == "api.beego.me" { -// return true -// } -// return false -// }) -// Cond as the first filter -func (n *Namespace) Cond(cond namespaceCond) *Namespace { - fn := func(ctx *beecontext.Context) { - if !cond(ctx) { - exception("405", ctx) - } - } - if v := n.handlers.filters[BeforeRouter]; len(v) > 0 { - mr := new(FilterRouter) - mr.tree = NewTree() - mr.pattern = "*" - mr.filterFunc = fn - mr.tree.AddRouter("*", true) - n.handlers.filters[BeforeRouter] = append([]*FilterRouter{mr}, v...) - } else { - n.handlers.InsertFilter("*", BeforeRouter, fn) - } - return n -} - -// Filter add filter in the Namespace -// action has before & after -// FilterFunc -// usage: -// Filter("before", func (ctx *context.Context){ -// _, ok := ctx.Input.Session("uid").(int) -// if !ok && ctx.Request.RequestURI != "/login" { -// ctx.Redirect(302, "/login") -// } -// }) -func (n *Namespace) Filter(action string, filter ...FilterFunc) *Namespace { - var a int - if action == "before" { - a = BeforeRouter - } else if action == "after" { - a = FinishRouter - } - for _, f := range filter { - n.handlers.InsertFilter("*", a, f) - } - return n -} - -// Router same as beego.Rourer -// refer: https://godoc.org/github.com/astaxie/beego#Router -func (n *Namespace) Router(rootpath string, c ControllerInterface, mappingMethods ...string) *Namespace { - n.handlers.Add(rootpath, c, mappingMethods...) - return n -} - -// AutoRouter same as beego.AutoRouter -// refer: https://godoc.org/github.com/astaxie/beego#AutoRouter -func (n *Namespace) AutoRouter(c ControllerInterface) *Namespace { - n.handlers.AddAuto(c) - return n -} - -// AutoPrefix same as beego.AutoPrefix -// refer: https://godoc.org/github.com/astaxie/beego#AutoPrefix -func (n *Namespace) AutoPrefix(prefix string, c ControllerInterface) *Namespace { - n.handlers.AddAutoPrefix(prefix, c) - return n -} - -// Get same as beego.Get -// refer: https://godoc.org/github.com/astaxie/beego#Get -func (n *Namespace) Get(rootpath string, f FilterFunc) *Namespace { - n.handlers.Get(rootpath, f) - return n -} - -// Post same as beego.Post -// refer: https://godoc.org/github.com/astaxie/beego#Post -func (n *Namespace) Post(rootpath string, f FilterFunc) *Namespace { - n.handlers.Post(rootpath, f) - return n -} - -// Delete same as beego.Delete -// refer: https://godoc.org/github.com/astaxie/beego#Delete -func (n *Namespace) Delete(rootpath string, f FilterFunc) *Namespace { - n.handlers.Delete(rootpath, f) - return n -} - -// Put same as beego.Put -// refer: https://godoc.org/github.com/astaxie/beego#Put -func (n *Namespace) Put(rootpath string, f FilterFunc) *Namespace { - n.handlers.Put(rootpath, f) - return n -} - -// Head same as beego.Head -// refer: https://godoc.org/github.com/astaxie/beego#Head -func (n *Namespace) Head(rootpath string, f FilterFunc) *Namespace { - n.handlers.Head(rootpath, f) - return n -} - -// Options same as beego.Options -// refer: https://godoc.org/github.com/astaxie/beego#Options -func (n *Namespace) Options(rootpath string, f FilterFunc) *Namespace { - n.handlers.Options(rootpath, f) - return n -} - -// Patch same as beego.Patch -// refer: https://godoc.org/github.com/astaxie/beego#Patch -func (n *Namespace) Patch(rootpath string, f FilterFunc) *Namespace { - n.handlers.Patch(rootpath, f) - return n -} - -// Any same as beego.Any -// refer: https://godoc.org/github.com/astaxie/beego#Any -func (n *Namespace) Any(rootpath string, f FilterFunc) *Namespace { - n.handlers.Any(rootpath, f) - return n -} - -// Handler same as beego.Handler -// refer: https://godoc.org/github.com/astaxie/beego#Handler -func (n *Namespace) Handler(rootpath string, h http.Handler) *Namespace { - n.handlers.Handler(rootpath, h) - return n -} - -// Include add include class -// refer: https://godoc.org/github.com/astaxie/beego#Include -func (n *Namespace) Include(cList ...ControllerInterface) *Namespace { - n.handlers.Include(cList...) - return n -} - -// Namespace add nest Namespace -// usage: -//ns := beego.NewNamespace(“/v1”). -//Namespace( -// beego.NewNamespace("/shop"). -// Get("/:id", func(ctx *context.Context) { -// ctx.Output.Body([]byte("shopinfo")) -// }), -// beego.NewNamespace("/order"). -// Get("/:id", func(ctx *context.Context) { -// ctx.Output.Body([]byte("orderinfo")) -// }), -// beego.NewNamespace("/crm"). -// Get("/:id", func(ctx *context.Context) { -// ctx.Output.Body([]byte("crminfo")) -// }), -//) -func (n *Namespace) Namespace(ns ...*Namespace) *Namespace { - for _, ni := range ns { - for k, v := range ni.handlers.routers { - if t, ok := n.handlers.routers[k]; ok { - addPrefix(v, ni.prefix) - n.handlers.routers[k].AddTree(ni.prefix, v) - } else { - t = NewTree() - t.AddTree(ni.prefix, v) - addPrefix(t, ni.prefix) - n.handlers.routers[k] = t - } - } - if ni.handlers.enableFilter { - for pos, filterList := range ni.handlers.filters { - for _, mr := range filterList { - t := NewTree() - t.AddTree(ni.prefix, mr.tree) - mr.tree = t - n.handlers.insertFilterRouter(pos, mr) - } - } - } - } - return n -} - -// AddNamespace register Namespace into beego.Handler -// support multi Namespace -func AddNamespace(nl ...*Namespace) { - for _, n := range nl { - for k, v := range n.handlers.routers { - if t, ok := BeeApp.Handlers.routers[k]; ok { - addPrefix(v, n.prefix) - BeeApp.Handlers.routers[k].AddTree(n.prefix, v) - } else { - t = NewTree() - t.AddTree(n.prefix, v) - addPrefix(t, n.prefix) - BeeApp.Handlers.routers[k] = t - } - } - if n.handlers.enableFilter { - for pos, filterList := range n.handlers.filters { - for _, mr := range filterList { - t := NewTree() - t.AddTree(n.prefix, mr.tree) - mr.tree = t - BeeApp.Handlers.insertFilterRouter(pos, mr) - } - } - } - } -} - -func addPrefix(t *Tree, prefix string) { - for _, v := range t.fixrouters { - addPrefix(v, prefix) - } - if t.wildcard != nil { - addPrefix(t.wildcard, prefix) - } - for _, l := range t.leaves { - if c, ok := l.runObject.(*ControllerInfo); ok { - if !strings.HasPrefix(c.pattern, prefix) { - c.pattern = prefix + c.pattern - } - } - } -} - -// NSCond is Namespace Condition -func NSCond(cond namespaceCond) LinkNamespace { - return func(ns *Namespace) { - ns.Cond(cond) - } -} - -// NSBefore Namespace BeforeRouter filter -func NSBefore(filterList ...FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Filter("before", filterList...) - } -} - -// NSAfter add Namespace FinishRouter filter -func NSAfter(filterList ...FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Filter("after", filterList...) - } -} - -// NSInclude Namespace Include ControllerInterface -func NSInclude(cList ...ControllerInterface) LinkNamespace { - return func(ns *Namespace) { - ns.Include(cList...) - } -} - -// NSRouter call Namespace Router -func NSRouter(rootpath string, c ControllerInterface, mappingMethods ...string) LinkNamespace { - return func(ns *Namespace) { - ns.Router(rootpath, c, mappingMethods...) - } -} - -// NSGet call Namespace Get -func NSGet(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Get(rootpath, f) - } -} - -// NSPost call Namespace Post -func NSPost(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Post(rootpath, f) - } -} - -// NSHead call Namespace Head -func NSHead(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Head(rootpath, f) - } -} - -// NSPut call Namespace Put -func NSPut(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Put(rootpath, f) - } -} - -// NSDelete call Namespace Delete -func NSDelete(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Delete(rootpath, f) - } -} - -// NSAny call Namespace Any -func NSAny(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Any(rootpath, f) - } -} - -// NSOptions call Namespace Options -func NSOptions(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Options(rootpath, f) - } -} - -// NSPatch call Namespace Patch -func NSPatch(rootpath string, f FilterFunc) LinkNamespace { - return func(ns *Namespace) { - ns.Patch(rootpath, f) - } -} - -// NSAutoRouter call Namespace AutoRouter -func NSAutoRouter(c ControllerInterface) LinkNamespace { - return func(ns *Namespace) { - ns.AutoRouter(c) - } -} - -// NSAutoPrefix call Namespace AutoPrefix -func NSAutoPrefix(prefix string, c ControllerInterface) LinkNamespace { - return func(ns *Namespace) { - ns.AutoPrefix(prefix, c) - } -} - -// NSNamespace add sub Namespace -func NSNamespace(prefix string, params ...LinkNamespace) LinkNamespace { - return func(ns *Namespace) { - n := NewNamespace(prefix, params...) - ns.Namespace(n) - } -} - -// NSHandler add handler -func NSHandler(rootpath string, h http.Handler) LinkNamespace { - return func(ns *Namespace) { - ns.Handler(rootpath, h) - } -} diff --git a/vendor/github.com/astaxie/beego/parser.go b/vendor/github.com/astaxie/beego/parser.go deleted file mode 100644 index a86902747..000000000 --- a/vendor/github.com/astaxie/beego/parser.go +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "encoding/json" - "errors" - "fmt" - "go/ast" - "go/parser" - "go/token" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "unicode" - - "github.com/astaxie/beego/context/param" - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/utils" -) - -var globalRouterTemplate = `package routers - -import ( - "github.com/astaxie/beego" - "github.com/astaxie/beego/context/param"{{.globalimport}} -) - -func init() { -{{.globalinfo}} -} -` - -var ( - lastupdateFilename = "lastupdate.tmp" - commentFilename string - pkgLastupdate map[string]int64 - genInfoList map[string][]ControllerComments - - routerHooks = map[string]int{ - "beego.BeforeStatic": BeforeStatic, - "beego.BeforeRouter": BeforeRouter, - "beego.BeforeExec": BeforeExec, - "beego.AfterExec": AfterExec, - "beego.FinishRouter": FinishRouter, - } - - routerHooksMapping = map[int]string{ - BeforeStatic: "beego.BeforeStatic", - BeforeRouter: "beego.BeforeRouter", - BeforeExec: "beego.BeforeExec", - AfterExec: "beego.AfterExec", - FinishRouter: "beego.FinishRouter", - } -) - -const commentPrefix = "commentsRouter_" - -func init() { - pkgLastupdate = make(map[string]int64) -} - -func parserPkg(pkgRealpath, pkgpath string) error { - rep := strings.NewReplacer("\\", "_", "/", "_", ".", "_") - commentFilename, _ = filepath.Rel(AppPath, pkgRealpath) - commentFilename = commentPrefix + rep.Replace(commentFilename) + ".go" - if !compareFile(pkgRealpath) { - logs.Info(pkgRealpath + " no changed") - return nil - } - genInfoList = make(map[string][]ControllerComments) - fileSet := token.NewFileSet() - astPkgs, err := parser.ParseDir(fileSet, pkgRealpath, func(info os.FileInfo) bool { - name := info.Name() - return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") - }, parser.ParseComments) - - if err != nil { - return err - } - for _, pkg := range astPkgs { - for _, fl := range pkg.Files { - for _, d := range fl.Decls { - switch specDecl := d.(type) { - case *ast.FuncDecl: - if specDecl.Recv != nil { - exp, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr) // Check that the type is correct first beforing throwing to parser - if ok { - parserComments(specDecl, fmt.Sprint(exp.X), pkgpath) - } - } - } - } - } - } - genRouterCode(pkgRealpath) - savetoFile(pkgRealpath) - return nil -} - -type parsedComment struct { - routerPath string - methods []string - params map[string]parsedParam - filters []parsedFilter - imports []parsedImport -} - -type parsedImport struct { - importPath string - importAlias string -} - -type parsedFilter struct { - pattern string - pos int - filter string - params []bool -} - -type parsedParam struct { - name string - datatype string - location string - defValue string - required bool -} - -func parserComments(f *ast.FuncDecl, controllerName, pkgpath string) error { - if f.Doc != nil { - parsedComments, err := parseComment(f.Doc.List) - if err != nil { - return err - } - for _, parsedComment := range parsedComments { - if parsedComment.routerPath != "" { - key := pkgpath + ":" + controllerName - cc := ControllerComments{} - cc.Method = f.Name.String() - cc.Router = parsedComment.routerPath - cc.AllowHTTPMethods = parsedComment.methods - cc.MethodParams = buildMethodParams(f.Type.Params.List, parsedComment) - cc.FilterComments = buildFilters(parsedComment.filters) - cc.ImportComments = buildImports(parsedComment.imports) - genInfoList[key] = append(genInfoList[key], cc) - } - } - } - return nil -} - -func buildImports(pis []parsedImport) []*ControllerImportComments { - var importComments []*ControllerImportComments - - for _, pi := range pis { - importComments = append(importComments, &ControllerImportComments{ - ImportPath: pi.importPath, - ImportAlias: pi.importAlias, - }) - } - - return importComments -} - -func buildFilters(pfs []parsedFilter) []*ControllerFilterComments { - var filterComments []*ControllerFilterComments - - for _, pf := range pfs { - var ( - returnOnOutput bool - resetParams bool - ) - - if len(pf.params) >= 1 { - returnOnOutput = pf.params[0] - } - - if len(pf.params) >= 2 { - resetParams = pf.params[1] - } - - filterComments = append(filterComments, &ControllerFilterComments{ - Filter: pf.filter, - Pattern: pf.pattern, - Pos: pf.pos, - ReturnOnOutput: returnOnOutput, - ResetParams: resetParams, - }) - } - - return filterComments -} - -func buildMethodParams(funcParams []*ast.Field, pc *parsedComment) []*param.MethodParam { - result := make([]*param.MethodParam, 0, len(funcParams)) - for _, fparam := range funcParams { - for _, pName := range fparam.Names { - methodParam := buildMethodParam(fparam, pName.Name, pc) - result = append(result, methodParam) - } - } - return result -} - -func buildMethodParam(fparam *ast.Field, name string, pc *parsedComment) *param.MethodParam { - options := []param.MethodParamOption{} - if cparam, ok := pc.params[name]; ok { - //Build param from comment info - name = cparam.name - if cparam.required { - options = append(options, param.IsRequired) - } - switch cparam.location { - case "body": - options = append(options, param.InBody) - case "header": - options = append(options, param.InHeader) - case "path": - options = append(options, param.InPath) - } - if cparam.defValue != "" { - options = append(options, param.Default(cparam.defValue)) - } - } else { - if paramInPath(name, pc.routerPath) { - options = append(options, param.InPath) - } - } - return param.New(name, options...) -} - -func paramInPath(name, route string) bool { - return strings.HasSuffix(route, ":"+name) || - strings.Contains(route, ":"+name+"/") -} - -var routeRegex = regexp.MustCompile(`@router\s+(\S+)(?:\s+\[(\S+)\])?`) - -func parseComment(lines []*ast.Comment) (pcs []*parsedComment, err error) { - pcs = []*parsedComment{} - params := map[string]parsedParam{} - filters := []parsedFilter{} - imports := []parsedImport{} - - for _, c := range lines { - t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) - if strings.HasPrefix(t, "@Param") { - pv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Param"))) - if len(pv) < 4 { - logs.Error("Invalid @Param format. Needs at least 4 parameters") - } - p := parsedParam{} - names := strings.SplitN(pv[0], "=>", 2) - p.name = names[0] - funcParamName := p.name - if len(names) > 1 { - funcParamName = names[1] - } - p.location = pv[1] - p.datatype = pv[2] - switch len(pv) { - case 5: - p.required, _ = strconv.ParseBool(pv[3]) - case 6: - p.defValue = pv[3] - p.required, _ = strconv.ParseBool(pv[4]) - } - params[funcParamName] = p - } - } - - for _, c := range lines { - t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) - if strings.HasPrefix(t, "@Import") { - iv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Import"))) - if len(iv) == 0 || len(iv) > 2 { - logs.Error("Invalid @Import format. Only accepts 1 or 2 parameters") - continue - } - - p := parsedImport{} - p.importPath = iv[0] - - if len(iv) == 2 { - p.importAlias = iv[1] - } - - imports = append(imports, p) - } - } - -filterLoop: - for _, c := range lines { - t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) - if strings.HasPrefix(t, "@Filter") { - fv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Filter"))) - if len(fv) < 3 { - logs.Error("Invalid @Filter format. Needs at least 3 parameters") - continue filterLoop - } - - p := parsedFilter{} - p.pattern = fv[0] - posName := fv[1] - if pos, exists := routerHooks[posName]; exists { - p.pos = pos - } else { - logs.Error("Invalid @Filter pos: ", posName) - continue filterLoop - } - - p.filter = fv[2] - fvParams := fv[3:] - for _, fvParam := range fvParams { - switch fvParam { - case "true": - p.params = append(p.params, true) - case "false": - p.params = append(p.params, false) - default: - logs.Error("Invalid @Filter param: ", fvParam) - continue filterLoop - } - } - - filters = append(filters, p) - } - } - - for _, c := range lines { - var pc = &parsedComment{} - pc.params = params - pc.filters = filters - pc.imports = imports - - t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) - if strings.HasPrefix(t, "@router") { - t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) - matches := routeRegex.FindStringSubmatch(t) - if len(matches) == 3 { - pc.routerPath = matches[1] - methods := matches[2] - if methods == "" { - pc.methods = []string{"get"} - //pc.hasGet = true - } else { - pc.methods = strings.Split(methods, ",") - //pc.hasGet = strings.Contains(methods, "get") - } - pcs = append(pcs, pc) - } else { - return nil, errors.New("Router information is missing") - } - } - } - return -} - -// direct copy from bee\g_docs.go -// analysis params return []string -// @Param query form string true "The email for login" -// [query form string true "The email for login"] -func getparams(str string) []string { - var s []rune - var j int - var start bool - var r []string - var quoted int8 - for _, c := range str { - if unicode.IsSpace(c) && quoted == 0 { - if !start { - continue - } else { - start = false - j++ - r = append(r, string(s)) - s = make([]rune, 0) - continue - } - } - - start = true - if c == '"' { - quoted ^= 1 - continue - } - s = append(s, c) - } - if len(s) > 0 { - r = append(r, string(s)) - } - return r -} - -func genRouterCode(pkgRealpath string) { - os.Mkdir(getRouterDir(pkgRealpath), 0755) - logs.Info("generate router from comments") - var ( - globalinfo string - globalimport string - sortKey []string - ) - for k := range genInfoList { - sortKey = append(sortKey, k) - } - sort.Strings(sortKey) - for _, k := range sortKey { - cList := genInfoList[k] - sort.Sort(ControllerCommentsSlice(cList)) - for _, c := range cList { - allmethod := "nil" - if len(c.AllowHTTPMethods) > 0 { - allmethod = "[]string{" - for _, m := range c.AllowHTTPMethods { - allmethod += `"` + m + `",` - } - allmethod = strings.TrimRight(allmethod, ",") + "}" - } - - params := "nil" - if len(c.Params) > 0 { - params = "[]map[string]string{" - for _, p := range c.Params { - for k, v := range p { - params = params + `map[string]string{` + k + `:"` + v + `"},` - } - } - params = strings.TrimRight(params, ",") + "}" - } - - methodParams := "param.Make(" - if len(c.MethodParams) > 0 { - lines := make([]string, 0, len(c.MethodParams)) - for _, m := range c.MethodParams { - lines = append(lines, fmt.Sprint(m)) - } - methodParams += "\n " + - strings.Join(lines, ",\n ") + - ",\n " - } - methodParams += ")" - - imports := "" - if len(c.ImportComments) > 0 { - for _, i := range c.ImportComments { - if i.ImportAlias != "" { - imports += fmt.Sprintf(` - %s "%s"`, i.ImportAlias, i.ImportPath) - } else { - imports += fmt.Sprintf(` - "%s"`, i.ImportPath) - } - } - } - - filters := "" - if len(c.FilterComments) > 0 { - for _, f := range c.FilterComments { - filters += fmt.Sprintf(` &beego.ControllerFilter{ - Pattern: "%s", - Pos: %s, - Filter: %s, - ReturnOnOutput: %v, - ResetParams: %v, - },`, f.Pattern, routerHooksMapping[f.Pos], f.Filter, f.ReturnOnOutput, f.ResetParams) - } - } - - if filters == "" { - filters = "nil" - } else { - filters = fmt.Sprintf(`[]*beego.ControllerFilter{ -%s - }`, filters) - } - - globalimport = imports - - globalinfo = globalinfo + ` - beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"], - beego.ControllerComments{ - Method: "` + strings.TrimSpace(c.Method) + `", - ` + "Router: `" + c.Router + "`" + `, - AllowHTTPMethods: ` + allmethod + `, - MethodParams: ` + methodParams + `, - Filters: ` + filters + `, - Params: ` + params + `}) -` - } - } - - if globalinfo != "" { - f, err := os.Create(filepath.Join(getRouterDir(pkgRealpath), commentFilename)) - if err != nil { - panic(err) - } - defer f.Close() - - content := strings.Replace(globalRouterTemplate, "{{.globalinfo}}", globalinfo, -1) - content = strings.Replace(content, "{{.globalimport}}", globalimport, -1) - f.WriteString(content) - } -} - -func compareFile(pkgRealpath string) bool { - if !utils.FileExists(filepath.Join(getRouterDir(pkgRealpath), commentFilename)) { - return true - } - if utils.FileExists(lastupdateFilename) { - content, err := ioutil.ReadFile(lastupdateFilename) - if err != nil { - return true - } - json.Unmarshal(content, &pkgLastupdate) - lastupdate, err := getpathTime(pkgRealpath) - if err != nil { - return true - } - if v, ok := pkgLastupdate[pkgRealpath]; ok { - if lastupdate <= v { - return false - } - } - } - return true -} - -func savetoFile(pkgRealpath string) { - lastupdate, err := getpathTime(pkgRealpath) - if err != nil { - return - } - pkgLastupdate[pkgRealpath] = lastupdate - d, err := json.Marshal(pkgLastupdate) - if err != nil { - return - } - ioutil.WriteFile(lastupdateFilename, d, os.ModePerm) -} - -func getpathTime(pkgRealpath string) (lastupdate int64, err error) { - fl, err := ioutil.ReadDir(pkgRealpath) - if err != nil { - return lastupdate, err - } - for _, f := range fl { - if lastupdate < f.ModTime().UnixNano() { - lastupdate = f.ModTime().UnixNano() - } - } - return lastupdate, nil -} - -func getRouterDir(pkgRealpath string) string { - dir := filepath.Dir(pkgRealpath) - for { - d := filepath.Join(dir, "routers") - if utils.FileExists(d) { - return d - } - - if r, _ := filepath.Rel(dir, AppPath); r == "." { - return d - } - // Parent dir. - dir = filepath.Dir(dir) - } -} diff --git a/vendor/github.com/astaxie/beego/policy.go b/vendor/github.com/astaxie/beego/policy.go deleted file mode 100644 index ab23f927a..000000000 --- a/vendor/github.com/astaxie/beego/policy.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2016 beego authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "strings" - - "github.com/astaxie/beego/context" -) - -// PolicyFunc defines a policy function which is invoked before the controller handler is executed. -type PolicyFunc func(*context.Context) - -// FindPolicy Find Router info for URL -func (p *ControllerRegister) FindPolicy(cont *context.Context) []PolicyFunc { - var urlPath = cont.Input.URL() - if !BConfig.RouterCaseSensitive { - urlPath = strings.ToLower(urlPath) - } - httpMethod := cont.Input.Method() - isWildcard := false - // Find policy for current method - t, ok := p.policies[httpMethod] - // If not found - find policy for whole controller - if !ok { - t, ok = p.policies["*"] - isWildcard = true - } - if ok { - runObjects := t.Match(urlPath, cont) - if r, ok := runObjects.([]PolicyFunc); ok { - return r - } else if !isWildcard { - // If no policies found and we checked not for "*" method - try to find it - t, ok = p.policies["*"] - if ok { - runObjects = t.Match(urlPath, cont) - if r, ok = runObjects.([]PolicyFunc); ok { - return r - } - } - } - } - return nil -} - -func (p *ControllerRegister) addToPolicy(method, pattern string, r ...PolicyFunc) { - method = strings.ToUpper(method) - p.enablePolicy = true - if !BConfig.RouterCaseSensitive { - pattern = strings.ToLower(pattern) - } - if t, ok := p.policies[method]; ok { - t.AddRouter(pattern, r) - } else { - t := NewTree() - t.AddRouter(pattern, r) - p.policies[method] = t - } -} - -// Policy Register new policy in beego -func Policy(pattern, method string, policy ...PolicyFunc) { - BeeApp.Handlers.addToPolicy(method, pattern, policy...) -} - -// Find policies and execute if were found -func (p *ControllerRegister) execPolicy(cont *context.Context, urlPath string) (started bool) { - if !p.enablePolicy { - return false - } - // Find Policy for method - policyList := p.FindPolicy(cont) - if len(policyList) > 0 { - // Run policies - for _, runPolicy := range policyList { - runPolicy(cont) - if cont.ResponseWriter.Started { - return true - } - } - return false - } - return false -} diff --git a/vendor/github.com/astaxie/beego/router.go b/vendor/github.com/astaxie/beego/router.go deleted file mode 100644 index 997b68542..000000000 --- a/vendor/github.com/astaxie/beego/router.go +++ /dev/null @@ -1,1016 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "fmt" - "net/http" - "path" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - beecontext "github.com/astaxie/beego/context" - "github.com/astaxie/beego/context/param" - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/toolbox" - "github.com/astaxie/beego/utils" -) - -// default filter execution points -const ( - BeforeStatic = iota - BeforeRouter - BeforeExec - AfterExec - FinishRouter -) - -const ( - routerTypeBeego = iota - routerTypeRESTFul - routerTypeHandler -) - -var ( - // HTTPMETHOD list the supported http methods. - HTTPMETHOD = map[string]bool{ - "GET": true, - "POST": true, - "PUT": true, - "DELETE": true, - "PATCH": true, - "OPTIONS": true, - "HEAD": true, - "TRACE": true, - "CONNECT": true, - "MKCOL": true, - "COPY": true, - "MOVE": true, - "PROPFIND": true, - "PROPPATCH": true, - "LOCK": true, - "UNLOCK": true, - } - // these beego.Controller's methods shouldn't reflect to AutoRouter - exceptMethod = []string{"Init", "Prepare", "Finish", "Render", "RenderString", - "RenderBytes", "Redirect", "Abort", "StopRun", "UrlFor", "ServeJSON", "ServeJSONP", - "ServeYAML", "ServeXML", "Input", "ParseForm", "GetString", "GetStrings", "GetInt", "GetBool", - "GetFloat", "GetFile", "SaveToFile", "StartSession", "SetSession", "GetSession", - "DelSession", "SessionRegenerateID", "DestroySession", "IsAjax", "GetSecureCookie", - "SetSecureCookie", "XsrfToken", "CheckXsrfCookie", "XsrfFormHtml", - "GetControllerAndAction", "ServeFormatted"} - - urlPlaceholder = "{{placeholder}}" - // DefaultAccessLogFilter will skip the accesslog if return true - DefaultAccessLogFilter FilterHandler = &logFilter{} -) - -// FilterHandler is an interface for -type FilterHandler interface { - Filter(*beecontext.Context) bool -} - -// default log filter static file will not show -type logFilter struct { -} - -func (l *logFilter) Filter(ctx *beecontext.Context) bool { - requestPath := path.Clean(ctx.Request.URL.Path) - if requestPath == "/favicon.ico" || requestPath == "/robots.txt" { - return true - } - for prefix := range BConfig.WebConfig.StaticDir { - if strings.HasPrefix(requestPath, prefix) { - return true - } - } - return false -} - -// ExceptMethodAppend to append a slice's value into "exceptMethod", for controller's methods shouldn't reflect to AutoRouter -func ExceptMethodAppend(action string) { - exceptMethod = append(exceptMethod, action) -} - -// ControllerInfo holds information about the controller. -type ControllerInfo struct { - pattern string - controllerType reflect.Type - methods map[string]string - handler http.Handler - runFunction FilterFunc - routerType int - initialize func() ControllerInterface - methodParams []*param.MethodParam -} - -// ControllerRegister containers registered router rules, controller handlers and filters. -type ControllerRegister struct { - routers map[string]*Tree - enablePolicy bool - policies map[string]*Tree - enableFilter bool - filters [FinishRouter + 1][]*FilterRouter - pool sync.Pool -} - -// NewControllerRegister returns a new ControllerRegister. -func NewControllerRegister() *ControllerRegister { - return &ControllerRegister{ - routers: make(map[string]*Tree), - policies: make(map[string]*Tree), - pool: sync.Pool{ - New: func() interface{} { - return beecontext.NewContext() - }, - }, - } -} - -// Add controller handler and pattern rules to ControllerRegister. -// usage: -// default methods is the same name as method -// Add("/user",&UserController{}) -// Add("/api/list",&RestController{},"*:ListFood") -// Add("/api/create",&RestController{},"post:CreateFood") -// Add("/api/update",&RestController{},"put:UpdateFood") -// Add("/api/delete",&RestController{},"delete:DeleteFood") -// Add("/api",&RestController{},"get,post:ApiFunc" -// Add("/simple",&SimpleController{},"get:GetFunc;post:PostFunc") -func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingMethods ...string) { - p.addWithMethodParams(pattern, c, nil, mappingMethods...) -} - -func (p *ControllerRegister) addWithMethodParams(pattern string, c ControllerInterface, methodParams []*param.MethodParam, mappingMethods ...string) { - reflectVal := reflect.ValueOf(c) - t := reflect.Indirect(reflectVal).Type() - methods := make(map[string]string) - if len(mappingMethods) > 0 { - semi := strings.Split(mappingMethods[0], ";") - for _, v := range semi { - colon := strings.Split(v, ":") - if len(colon) != 2 { - panic("method mapping format is invalid") - } - comma := strings.Split(colon[0], ",") - for _, m := range comma { - if m == "*" || HTTPMETHOD[strings.ToUpper(m)] { - if val := reflectVal.MethodByName(colon[1]); val.IsValid() { - methods[strings.ToUpper(m)] = colon[1] - } else { - panic("'" + colon[1] + "' method doesn't exist in the controller " + t.Name()) - } - } else { - panic(v + " is an invalid method mapping. Method doesn't exist " + m) - } - } - } - } - - route := &ControllerInfo{} - route.pattern = pattern - route.methods = methods - route.routerType = routerTypeBeego - route.controllerType = t - route.initialize = func() ControllerInterface { - vc := reflect.New(route.controllerType) - execController, ok := vc.Interface().(ControllerInterface) - if !ok { - panic("controller is not ControllerInterface") - } - - elemVal := reflect.ValueOf(c).Elem() - elemType := reflect.TypeOf(c).Elem() - execElem := reflect.ValueOf(execController).Elem() - - numOfFields := elemVal.NumField() - for i := 0; i < numOfFields; i++ { - fieldType := elemType.Field(i) - elemField := execElem.FieldByName(fieldType.Name) - if elemField.CanSet() { - fieldVal := elemVal.Field(i) - elemField.Set(fieldVal) - } - } - - return execController - } - - route.methodParams = methodParams - if len(methods) == 0 { - for m := range HTTPMETHOD { - p.addToRouter(m, pattern, route) - } - } else { - for k := range methods { - if k == "*" { - for m := range HTTPMETHOD { - p.addToRouter(m, pattern, route) - } - } else { - p.addToRouter(k, pattern, route) - } - } - } -} - -func (p *ControllerRegister) addToRouter(method, pattern string, r *ControllerInfo) { - if !BConfig.RouterCaseSensitive { - pattern = strings.ToLower(pattern) - } - if t, ok := p.routers[method]; ok { - t.AddRouter(pattern, r) - } else { - t := NewTree() - t.AddRouter(pattern, r) - p.routers[method] = t - } -} - -// Include only when the Runmode is dev will generate router file in the router/auto.go from the controller -// Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{}) -func (p *ControllerRegister) Include(cList ...ControllerInterface) { - if BConfig.RunMode == DEV { - skip := make(map[string]bool, 10) - for _, c := range cList { - reflectVal := reflect.ValueOf(c) - t := reflect.Indirect(reflectVal).Type() - wgopath := utils.GetGOPATHs() - if len(wgopath) == 0 { - panic("you are in dev mode. So please set gopath") - } - pkgpath := "" - for _, wg := range wgopath { - wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath())) - if utils.FileExists(wg) { - pkgpath = wg - break - } - } - if pkgpath != "" { - if _, ok := skip[pkgpath]; !ok { - skip[pkgpath] = true - parserPkg(pkgpath, t.PkgPath()) - } - } - } - } - for _, c := range cList { - reflectVal := reflect.ValueOf(c) - t := reflect.Indirect(reflectVal).Type() - key := t.PkgPath() + ":" + t.Name() - if comm, ok := GlobalControllerRouter[key]; ok { - for _, a := range comm { - for _, f := range a.Filters { - p.InsertFilter(f.Pattern, f.Pos, f.Filter, f.ReturnOnOutput, f.ResetParams) - } - - p.addWithMethodParams(a.Router, c, a.MethodParams, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method) - } - } - } -} - -// Get add get method -// usage: -// Get("/", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Get(pattern string, f FilterFunc) { - p.AddMethod("get", pattern, f) -} - -// Post add post method -// usage: -// Post("/api", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Post(pattern string, f FilterFunc) { - p.AddMethod("post", pattern, f) -} - -// Put add put method -// usage: -// Put("/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Put(pattern string, f FilterFunc) { - p.AddMethod("put", pattern, f) -} - -// Delete add delete method -// usage: -// Delete("/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Delete(pattern string, f FilterFunc) { - p.AddMethod("delete", pattern, f) -} - -// Head add head method -// usage: -// Head("/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Head(pattern string, f FilterFunc) { - p.AddMethod("head", pattern, f) -} - -// Patch add patch method -// usage: -// Patch("/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Patch(pattern string, f FilterFunc) { - p.AddMethod("patch", pattern, f) -} - -// Options add options method -// usage: -// Options("/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Options(pattern string, f FilterFunc) { - p.AddMethod("options", pattern, f) -} - -// Any add all method -// usage: -// Any("/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) Any(pattern string, f FilterFunc) { - p.AddMethod("*", pattern, f) -} - -// AddMethod add http method router -// usage: -// AddMethod("get","/api/:id", func(ctx *context.Context){ -// ctx.Output.Body("hello world") -// }) -func (p *ControllerRegister) AddMethod(method, pattern string, f FilterFunc) { - method = strings.ToUpper(method) - if method != "*" && !HTTPMETHOD[method] { - panic("not support http method: " + method) - } - route := &ControllerInfo{} - route.pattern = pattern - route.routerType = routerTypeRESTFul - route.runFunction = f - methods := make(map[string]string) - if method == "*" { - for val := range HTTPMETHOD { - methods[val] = val - } - } else { - methods[method] = method - } - route.methods = methods - for k := range methods { - if k == "*" { - for m := range HTTPMETHOD { - p.addToRouter(m, pattern, route) - } - } else { - p.addToRouter(k, pattern, route) - } - } -} - -// Handler add user defined Handler -func (p *ControllerRegister) Handler(pattern string, h http.Handler, options ...interface{}) { - route := &ControllerInfo{} - route.pattern = pattern - route.routerType = routerTypeHandler - route.handler = h - if len(options) > 0 { - if _, ok := options[0].(bool); ok { - pattern = path.Join(pattern, "?:all(.*)") - } - } - for m := range HTTPMETHOD { - p.addToRouter(m, pattern, route) - } -} - -// AddAuto router to ControllerRegister. -// example beego.AddAuto(&MainContorlller{}), -// MainController has method List and Page. -// visit the url /main/list to execute List function -// /main/page to execute Page function. -func (p *ControllerRegister) AddAuto(c ControllerInterface) { - p.AddAutoPrefix("/", c) -} - -// AddAutoPrefix Add auto router to ControllerRegister with prefix. -// example beego.AddAutoPrefix("/admin",&MainContorlller{}), -// MainController has method List and Page. -// visit the url /admin/main/list to execute List function -// /admin/main/page to execute Page function. -func (p *ControllerRegister) AddAutoPrefix(prefix string, c ControllerInterface) { - reflectVal := reflect.ValueOf(c) - rt := reflectVal.Type() - ct := reflect.Indirect(reflectVal).Type() - controllerName := strings.TrimSuffix(ct.Name(), "Controller") - for i := 0; i < rt.NumMethod(); i++ { - if !utils.InSlice(rt.Method(i).Name, exceptMethod) { - route := &ControllerInfo{} - route.routerType = routerTypeBeego - route.methods = map[string]string{"*": rt.Method(i).Name} - route.controllerType = ct - pattern := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name), "*") - patternInit := path.Join(prefix, controllerName, rt.Method(i).Name, "*") - patternFix := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name)) - patternFixInit := path.Join(prefix, controllerName, rt.Method(i).Name) - route.pattern = pattern - for m := range HTTPMETHOD { - p.addToRouter(m, pattern, route) - p.addToRouter(m, patternInit, route) - p.addToRouter(m, patternFix, route) - p.addToRouter(m, patternFixInit, route) - } - } - } -} - -// InsertFilter Add a FilterFunc with pattern rule and action constant. -// params is for: -// 1. setting the returnOnOutput value (false allows multiple filters to execute) -// 2. determining whether or not params need to be reset. -func (p *ControllerRegister) InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) error { - mr := &FilterRouter{ - tree: NewTree(), - pattern: pattern, - filterFunc: filter, - returnOnOutput: true, - } - if !BConfig.RouterCaseSensitive { - mr.pattern = strings.ToLower(pattern) - } - - paramsLen := len(params) - if paramsLen > 0 { - mr.returnOnOutput = params[0] - } - if paramsLen > 1 { - mr.resetParams = params[1] - } - mr.tree.AddRouter(pattern, true) - return p.insertFilterRouter(pos, mr) -} - -// add Filter into -func (p *ControllerRegister) insertFilterRouter(pos int, mr *FilterRouter) (err error) { - if pos < BeforeStatic || pos > FinishRouter { - err = fmt.Errorf("can not find your filter position") - return - } - p.enableFilter = true - p.filters[pos] = append(p.filters[pos], mr) - return nil -} - -// URLFor does another controller handler in this request function. -// it can access any controller method. -func (p *ControllerRegister) URLFor(endpoint string, values ...interface{}) string { - paths := strings.Split(endpoint, ".") - if len(paths) <= 1 { - logs.Warn("urlfor endpoint must like path.controller.method") - return "" - } - if len(values)%2 != 0 { - logs.Warn("urlfor params must key-value pair") - return "" - } - params := make(map[string]string) - if len(values) > 0 { - key := "" - for k, v := range values { - if k%2 == 0 { - key = fmt.Sprint(v) - } else { - params[key] = fmt.Sprint(v) - } - } - } - controllName := strings.Join(paths[:len(paths)-1], "/") - methodName := paths[len(paths)-1] - for m, t := range p.routers { - ok, url := p.geturl(t, "/", controllName, methodName, params, m) - if ok { - return url - } - } - return "" -} - -func (p *ControllerRegister) geturl(t *Tree, url, controllName, methodName string, params map[string]string, httpMethod string) (bool, string) { - for _, subtree := range t.fixrouters { - u := path.Join(url, subtree.prefix) - ok, u := p.geturl(subtree, u, controllName, methodName, params, httpMethod) - if ok { - return ok, u - } - } - if t.wildcard != nil { - u := path.Join(url, urlPlaceholder) - ok, u := p.geturl(t.wildcard, u, controllName, methodName, params, httpMethod) - if ok { - return ok, u - } - } - for _, l := range t.leaves { - if c, ok := l.runObject.(*ControllerInfo); ok { - if c.routerType == routerTypeBeego && - strings.HasSuffix(path.Join(c.controllerType.PkgPath(), c.controllerType.Name()), controllName) { - find := false - if HTTPMETHOD[strings.ToUpper(methodName)] { - if len(c.methods) == 0 { - find = true - } else if m, ok := c.methods[strings.ToUpper(methodName)]; ok && m == strings.ToUpper(methodName) { - find = true - } else if m, ok = c.methods["*"]; ok && m == methodName { - find = true - } - } - if !find { - for m, md := range c.methods { - if (m == "*" || m == httpMethod) && md == methodName { - find = true - } - } - } - if find { - if l.regexps == nil { - if len(l.wildcards) == 0 { - return true, strings.Replace(url, "/"+urlPlaceholder, "", 1) + toURL(params) - } - if len(l.wildcards) == 1 { - if v, ok := params[l.wildcards[0]]; ok { - delete(params, l.wildcards[0]) - return true, strings.Replace(url, urlPlaceholder, v, 1) + toURL(params) - } - return false, "" - } - if len(l.wildcards) == 3 && l.wildcards[0] == "." { - if p, ok := params[":path"]; ok { - if e, isok := params[":ext"]; isok { - delete(params, ":path") - delete(params, ":ext") - return true, strings.Replace(url, urlPlaceholder, p+"."+e, -1) + toURL(params) - } - } - } - canskip := false - for _, v := range l.wildcards { - if v == ":" { - canskip = true - continue - } - if u, ok := params[v]; ok { - delete(params, v) - url = strings.Replace(url, urlPlaceholder, u, 1) - } else { - if canskip { - canskip = false - continue - } - return false, "" - } - } - return true, url + toURL(params) - } - var i int - var startreg bool - regurl := "" - for _, v := range strings.Trim(l.regexps.String(), "^$") { - if v == '(' { - startreg = true - continue - } else if v == ')' { - startreg = false - if v, ok := params[l.wildcards[i]]; ok { - delete(params, l.wildcards[i]) - regurl = regurl + v - i++ - } else { - break - } - } else if !startreg { - regurl = string(append([]rune(regurl), v)) - } - } - if l.regexps.MatchString(regurl) { - ps := strings.Split(regurl, "/") - for _, p := range ps { - url = strings.Replace(url, urlPlaceholder, p, 1) - } - return true, url + toURL(params) - } - } - } - } - } - - return false, "" -} - -func (p *ControllerRegister) execFilter(context *beecontext.Context, urlPath string, pos int) (started bool) { - var preFilterParams map[string]string - for _, filterR := range p.filters[pos] { - if filterR.returnOnOutput && context.ResponseWriter.Started { - return true - } - if filterR.resetParams { - preFilterParams = context.Input.Params() - } - if ok := filterR.ValidRouter(urlPath, context); ok { - filterR.filterFunc(context) - if filterR.resetParams { - context.Input.ResetParams() - for k, v := range preFilterParams { - context.Input.SetParam(k, v) - } - } - } - if filterR.returnOnOutput && context.ResponseWriter.Started { - return true - } - } - return false -} - -// Implement http.Handler interface. -func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - startTime := time.Now() - var ( - runRouter reflect.Type - findRouter bool - runMethod string - methodParams []*param.MethodParam - routerInfo *ControllerInfo - isRunnable bool - ) - context := p.pool.Get().(*beecontext.Context) - context.Reset(rw, r) - - defer p.pool.Put(context) - if BConfig.RecoverFunc != nil { - defer BConfig.RecoverFunc(context) - } - - context.Output.EnableGzip = BConfig.EnableGzip - - if BConfig.RunMode == DEV { - context.Output.Header("Server", BConfig.ServerName) - } - - var urlPath = r.URL.Path - - if !BConfig.RouterCaseSensitive { - urlPath = strings.ToLower(urlPath) - } - - // filter wrong http method - if !HTTPMETHOD[r.Method] { - http.Error(rw, "Method Not Allowed", 405) - goto Admin - } - - // filter for static file - if len(p.filters[BeforeStatic]) > 0 && p.execFilter(context, urlPath, BeforeStatic) { - goto Admin - } - - serverStaticRouter(context) - - if context.ResponseWriter.Started { - findRouter = true - goto Admin - } - - if r.Method != http.MethodGet && r.Method != http.MethodHead { - if BConfig.CopyRequestBody && !context.Input.IsUpload() { - context.Input.CopyBody(BConfig.MaxMemory) - } - context.Input.ParseFormOrMulitForm(BConfig.MaxMemory) - } - - // session init - if BConfig.WebConfig.Session.SessionOn { - var err error - context.Input.CruSession, err = GlobalSessions.SessionStart(rw, r) - if err != nil { - logs.Error(err) - exception("503", context) - goto Admin - } - defer func() { - if context.Input.CruSession != nil { - context.Input.CruSession.SessionRelease(rw) - } - }() - } - if len(p.filters[BeforeRouter]) > 0 && p.execFilter(context, urlPath, BeforeRouter) { - goto Admin - } - // User can define RunController and RunMethod in filter - if context.Input.RunController != nil && context.Input.RunMethod != "" { - findRouter = true - runMethod = context.Input.RunMethod - runRouter = context.Input.RunController - } else { - routerInfo, findRouter = p.FindRouter(context) - } - - //if no matches to url, throw a not found exception - if !findRouter { - exception("404", context) - goto Admin - } - if splat := context.Input.Param(":splat"); splat != "" { - for k, v := range strings.Split(splat, "/") { - context.Input.SetParam(strconv.Itoa(k), v) - } - } - - //execute middleware filters - if len(p.filters[BeforeExec]) > 0 && p.execFilter(context, urlPath, BeforeExec) { - goto Admin - } - - //check policies - if p.execPolicy(context, urlPath) { - goto Admin - } - - if routerInfo != nil { - //store router pattern into context - context.Input.SetData("RouterPattern", routerInfo.pattern) - if routerInfo.routerType == routerTypeRESTFul { - if _, ok := routerInfo.methods[r.Method]; ok { - isRunnable = true - routerInfo.runFunction(context) - } else { - exception("405", context) - goto Admin - } - } else if routerInfo.routerType == routerTypeHandler { - isRunnable = true - routerInfo.handler.ServeHTTP(rw, r) - } else { - runRouter = routerInfo.controllerType - methodParams = routerInfo.methodParams - method := r.Method - if r.Method == http.MethodPost && context.Input.Query("_method") == http.MethodPost { - method = http.MethodPut - } - if r.Method == http.MethodPost && context.Input.Query("_method") == http.MethodDelete { - method = http.MethodDelete - } - if m, ok := routerInfo.methods[method]; ok { - runMethod = m - } else if m, ok = routerInfo.methods["*"]; ok { - runMethod = m - } else { - runMethod = method - } - } - } - - // also defined runRouter & runMethod from filter - if !isRunnable { - //Invoke the request handler - var execController ControllerInterface - if routerInfo != nil && routerInfo.initialize != nil { - execController = routerInfo.initialize() - } else { - vc := reflect.New(runRouter) - var ok bool - execController, ok = vc.Interface().(ControllerInterface) - if !ok { - panic("controller is not ControllerInterface") - } - } - - //call the controller init function - execController.Init(context, runRouter.Name(), runMethod, execController) - - //call prepare function - execController.Prepare() - - //if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf - if BConfig.WebConfig.EnableXSRF { - execController.XSRFToken() - if r.Method == http.MethodPost || r.Method == http.MethodDelete || r.Method == http.MethodPut || - (r.Method == http.MethodPost && (context.Input.Query("_method") == http.MethodDelete || context.Input.Query("_method") == http.MethodPut)) { - execController.CheckXSRFCookie() - } - } - - execController.URLMapping() - - if !context.ResponseWriter.Started { - //exec main logic - switch runMethod { - case http.MethodGet: - execController.Get() - case http.MethodPost: - execController.Post() - case http.MethodDelete: - execController.Delete() - case http.MethodPut: - execController.Put() - case http.MethodHead: - execController.Head() - case http.MethodPatch: - execController.Patch() - case http.MethodOptions: - execController.Options() - default: - if !execController.HandlerFunc(runMethod) { - vc := reflect.ValueOf(execController) - method := vc.MethodByName(runMethod) - in := param.ConvertParams(methodParams, method.Type(), context) - out := method.Call(in) - - //For backward compatibility we only handle response if we had incoming methodParams - if methodParams != nil { - p.handleParamResponse(context, execController, out) - } - } - } - - //render template - if !context.ResponseWriter.Started && context.Output.Status == 0 { - if BConfig.WebConfig.AutoRender { - if err := execController.Render(); err != nil { - logs.Error(err) - } - } - } - } - - // finish all runRouter. release resource - execController.Finish() - } - - //execute middleware filters - if len(p.filters[AfterExec]) > 0 && p.execFilter(context, urlPath, AfterExec) { - goto Admin - } - - if len(p.filters[FinishRouter]) > 0 && p.execFilter(context, urlPath, FinishRouter) { - goto Admin - } - -Admin: - //admin module record QPS - - statusCode := context.ResponseWriter.Status - if statusCode == 0 { - statusCode = 200 - } - - logAccess(context, &startTime, statusCode) - - timeDur := time.Since(startTime) - context.ResponseWriter.Elapsed = timeDur - if BConfig.Listen.EnableAdmin { - pattern := "" - if routerInfo != nil { - pattern = routerInfo.pattern - } - - if FilterMonitorFunc(r.Method, r.URL.Path, timeDur, pattern, statusCode) { - if runRouter != nil { - go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, runRouter.Name(), timeDur) - } else { - go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, "", timeDur) - } - } - } - - if BConfig.RunMode == DEV && !BConfig.Log.AccessLogs { - var devInfo string - iswin := (runtime.GOOS == "windows") - statusColor := logs.ColorByStatus(iswin, statusCode) - methodColor := logs.ColorByMethod(iswin, r.Method) - resetColor := logs.ColorByMethod(iswin, "") - if findRouter { - if routerInfo != nil { - devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s r:%s", context.Input.IP(), statusColor, statusCode, - resetColor, timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path, - routerInfo.pattern) - } else { - devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor, - timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path) - } - } else { - devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor, - timeDur.String(), "nomatch", methodColor, r.Method, resetColor, r.URL.Path) - } - if iswin { - logs.W32Debug(devInfo) - } else { - logs.Debug(devInfo) - } - } - // Call WriteHeader if status code has been set changed - if context.Output.Status != 0 { - context.ResponseWriter.WriteHeader(context.Output.Status) - } -} - -func (p *ControllerRegister) handleParamResponse(context *beecontext.Context, execController ControllerInterface, results []reflect.Value) { - //looping in reverse order for the case when both error and value are returned and error sets the response status code - for i := len(results) - 1; i >= 0; i-- { - result := results[i] - if result.Kind() != reflect.Interface || !result.IsNil() { - resultValue := result.Interface() - context.RenderMethodResult(resultValue) - } - } - if !context.ResponseWriter.Started && len(results) > 0 && context.Output.Status == 0 { - context.Output.SetStatus(200) - } -} - -// FindRouter Find Router info for URL -func (p *ControllerRegister) FindRouter(context *beecontext.Context) (routerInfo *ControllerInfo, isFind bool) { - var urlPath = context.Input.URL() - if !BConfig.RouterCaseSensitive { - urlPath = strings.ToLower(urlPath) - } - httpMethod := context.Input.Method() - if t, ok := p.routers[httpMethod]; ok { - runObject := t.Match(urlPath, context) - if r, ok := runObject.(*ControllerInfo); ok { - return r, true - } - } - return -} - -func toURL(params map[string]string) string { - if len(params) == 0 { - return "" - } - u := "?" - for k, v := range params { - u += k + "=" + v + "&" - } - return strings.TrimRight(u, "&") -} - -func logAccess(ctx *beecontext.Context, startTime *time.Time, statusCode int) { - //Skip logging if AccessLogs config is false - if !BConfig.Log.AccessLogs { - return - } - //Skip logging static requests unless EnableStaticLogs config is true - if !BConfig.Log.EnableStaticLogs && DefaultAccessLogFilter.Filter(ctx) { - return - } - var ( - requestTime time.Time - elapsedTime time.Duration - r = ctx.Request - ) - if startTime != nil { - requestTime = *startTime - elapsedTime = time.Since(*startTime) - } - record := &logs.AccessLogRecord{ - RemoteAddr: ctx.Input.IP(), - RequestTime: requestTime, - RequestMethod: r.Method, - Request: fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto), - ServerProtocol: r.Proto, - Host: r.Host, - Status: statusCode, - ElapsedTime: elapsedTime, - HTTPReferrer: r.Header.Get("Referer"), - HTTPUserAgent: r.Header.Get("User-Agent"), - RemoteUser: r.Header.Get("Remote-User"), - BodyBytesSent: 0, //@todo this one is missing! - } - logs.AccessLog(record, BConfig.Log.AccessLogsFormat) -} diff --git a/vendor/github.com/astaxie/beego/session/README.md b/vendor/github.com/astaxie/beego/session/README.md deleted file mode 100644 index 6d0a297e3..000000000 --- a/vendor/github.com/astaxie/beego/session/README.md +++ /dev/null @@ -1,114 +0,0 @@ -session -============== - -session is a Go session manager. It can use many session providers. Just like the `database/sql` and `database/sql/driver`. - -## How to install? - - go get github.com/astaxie/beego/session - - -## What providers are supported? - -As of now this session manager support memory, file, Redis and MySQL. - - -## How to use it? - -First you must import it - - import ( - "github.com/astaxie/beego/session" - ) - -Then in you web app init the global session manager - - var globalSessions *session.Manager - -* Use **memory** as provider: - - func init() { - globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid","gclifetime":3600}`) - go globalSessions.GC() - } - -* Use **file** as provider, the last param is the path where you want file to be stored: - - func init() { - globalSessions, _ = session.NewManager("file",`{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"./tmp"}`) - go globalSessions.GC() - } - -* Use **Redis** as provider, the last param is the Redis conn address,poolsize,password: - - func init() { - globalSessions, _ = session.NewManager("redis", `{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:6379,100,astaxie"}`) - go globalSessions.GC() - } - -* Use **MySQL** as provider, the last param is the DSN, learn more from [mysql](https://github.com/go-sql-driver/mysql#dsn-data-source-name): - - func init() { - globalSessions, _ = session.NewManager( - "mysql", `{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"username:password@protocol(address)/dbname?param=value"}`) - go globalSessions.GC() - } - -* Use **Cookie** as provider: - - func init() { - globalSessions, _ = session.NewManager( - "cookie", `{"cookieName":"gosessionid","enableSetCookie":false,"gclifetime":3600,"ProviderConfig":"{\"cookieName\":\"gosessionid\",\"securityKey\":\"beegocookiehashkey\"}"}`) - go globalSessions.GC() - } - - -Finally in the handlerfunc you can use it like this - - func login(w http.ResponseWriter, r *http.Request) { - sess := globalSessions.SessionStart(w, r) - defer sess.SessionRelease(w) - username := sess.Get("username") - fmt.Println(username) - if r.Method == "GET" { - t, _ := template.ParseFiles("login.gtpl") - t.Execute(w, nil) - } else { - fmt.Println("username:", r.Form["username"]) - sess.Set("username", r.Form["username"]) - fmt.Println("password:", r.Form["password"]) - } - } - - -## How to write own provider? - -When you develop a web app, maybe you want to write own provider because you must meet the requirements. - -Writing a provider is easy. You only need to define two struct types -(Session and Provider), which satisfy the interface definition. -Maybe you will find the **memory** provider is a good example. - - type SessionStore interface { - Set(key, value interface{}) error //set session value - Get(key interface{}) interface{} //get session value - Delete(key interface{}) error //delete session value - SessionID() string //back current sessionID - SessionRelease(w http.ResponseWriter) // release the resource & save data to provider & return the data - Flush() error //delete all data - } - - type Provider interface { - SessionInit(gclifetime int64, config string) error - SessionRead(sid string) (SessionStore, error) - SessionExist(sid string) bool - SessionRegenerate(oldsid, sid string) (SessionStore, error) - SessionDestroy(sid string) error - SessionAll() int //get all active session - SessionGC() - } - - -## LICENSE - -BSD License http://creativecommons.org/licenses/BSD/ diff --git a/vendor/github.com/astaxie/beego/session/sess_cookie.go b/vendor/github.com/astaxie/beego/session/sess_cookie.go deleted file mode 100644 index 145e53c9b..000000000 --- a/vendor/github.com/astaxie/beego/session/sess_cookie.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package session - -import ( - "crypto/aes" - "crypto/cipher" - "encoding/json" - "net/http" - "net/url" - "sync" -) - -var cookiepder = &CookieProvider{} - -// CookieSessionStore Cookie SessionStore -type CookieSessionStore struct { - sid string - values map[interface{}]interface{} // session data - lock sync.RWMutex -} - -// Set value to cookie session. -// the value are encoded as gob with hash block string. -func (st *CookieSessionStore) Set(key, value interface{}) error { - st.lock.Lock() - defer st.lock.Unlock() - st.values[key] = value - return nil -} - -// Get value from cookie session -func (st *CookieSessionStore) Get(key interface{}) interface{} { - st.lock.RLock() - defer st.lock.RUnlock() - if v, ok := st.values[key]; ok { - return v - } - return nil -} - -// Delete value in cookie session -func (st *CookieSessionStore) Delete(key interface{}) error { - st.lock.Lock() - defer st.lock.Unlock() - delete(st.values, key) - return nil -} - -// Flush Clean all values in cookie session -func (st *CookieSessionStore) Flush() error { - st.lock.Lock() - defer st.lock.Unlock() - st.values = make(map[interface{}]interface{}) - return nil -} - -// SessionID Return id of this cookie session -func (st *CookieSessionStore) SessionID() string { - return st.sid -} - -// SessionRelease Write cookie session to http response cookie -func (st *CookieSessionStore) SessionRelease(w http.ResponseWriter) { - encodedCookie, err := encodeCookie(cookiepder.block, cookiepder.config.SecurityKey, cookiepder.config.SecurityName, st.values) - if err == nil { - cookie := &http.Cookie{Name: cookiepder.config.CookieName, - Value: url.QueryEscape(encodedCookie), - Path: "/", - HttpOnly: true, - Secure: cookiepder.config.Secure, - MaxAge: cookiepder.config.Maxage} - http.SetCookie(w, cookie) - } -} - -type cookieConfig struct { - SecurityKey string `json:"securityKey"` - BlockKey string `json:"blockKey"` - SecurityName string `json:"securityName"` - CookieName string `json:"cookieName"` - Secure bool `json:"secure"` - Maxage int `json:"maxage"` -} - -// CookieProvider Cookie session provider -type CookieProvider struct { - maxlifetime int64 - config *cookieConfig - block cipher.Block -} - -// SessionInit Init cookie session provider with max lifetime and config json. -// maxlifetime is ignored. -// json config: -// securityKey - hash string -// blockKey - gob encode hash string. it's saved as aes crypto. -// securityName - recognized name in encoded cookie string -// cookieName - cookie name -// maxage - cookie max life time. -func (pder *CookieProvider) SessionInit(maxlifetime int64, config string) error { - pder.config = &cookieConfig{} - err := json.Unmarshal([]byte(config), pder.config) - if err != nil { - return err - } - if pder.config.BlockKey == "" { - pder.config.BlockKey = string(generateRandomKey(16)) - } - if pder.config.SecurityName == "" { - pder.config.SecurityName = string(generateRandomKey(20)) - } - pder.block, err = aes.NewCipher([]byte(pder.config.BlockKey)) - if err != nil { - return err - } - pder.maxlifetime = maxlifetime - return nil -} - -// SessionRead Get SessionStore in cooke. -// decode cooke string to map and put into SessionStore with sid. -func (pder *CookieProvider) SessionRead(sid string) (Store, error) { - maps, _ := decodeCookie(pder.block, - pder.config.SecurityKey, - pder.config.SecurityName, - sid, pder.maxlifetime) - if maps == nil { - maps = make(map[interface{}]interface{}) - } - rs := &CookieSessionStore{sid: sid, values: maps} - return rs, nil -} - -// SessionExist Cookie session is always existed -func (pder *CookieProvider) SessionExist(sid string) bool { - return true -} - -// SessionRegenerate Implement method, no used. -func (pder *CookieProvider) SessionRegenerate(oldsid, sid string) (Store, error) { - return nil, nil -} - -// SessionDestroy Implement method, no used. -func (pder *CookieProvider) SessionDestroy(sid string) error { - return nil -} - -// SessionGC Implement method, no used. -func (pder *CookieProvider) SessionGC() { -} - -// SessionAll Implement method, return 0. -func (pder *CookieProvider) SessionAll() int { - return 0 -} - -// SessionUpdate Implement method, no used. -func (pder *CookieProvider) SessionUpdate(sid string) error { - return nil -} - -func init() { - Register("cookie", cookiepder) -} diff --git a/vendor/github.com/astaxie/beego/session/sess_file.go b/vendor/github.com/astaxie/beego/session/sess_file.go deleted file mode 100644 index c089dade0..000000000 --- a/vendor/github.com/astaxie/beego/session/sess_file.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package session - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "sync" - "time" -) - -var ( - filepder = &FileProvider{} - gcmaxlifetime int64 -) - -// FileSessionStore File session store -type FileSessionStore struct { - sid string - lock sync.RWMutex - values map[interface{}]interface{} -} - -// Set value to file session -func (fs *FileSessionStore) Set(key, value interface{}) error { - fs.lock.Lock() - defer fs.lock.Unlock() - fs.values[key] = value - return nil -} - -// Get value from file session -func (fs *FileSessionStore) Get(key interface{}) interface{} { - fs.lock.RLock() - defer fs.lock.RUnlock() - if v, ok := fs.values[key]; ok { - return v - } - return nil -} - -// Delete value in file session by given key -func (fs *FileSessionStore) Delete(key interface{}) error { - fs.lock.Lock() - defer fs.lock.Unlock() - delete(fs.values, key) - return nil -} - -// Flush Clean all values in file session -func (fs *FileSessionStore) Flush() error { - fs.lock.Lock() - defer fs.lock.Unlock() - fs.values = make(map[interface{}]interface{}) - return nil -} - -// SessionID Get file session store id -func (fs *FileSessionStore) SessionID() string { - return fs.sid -} - -// SessionRelease Write file session to local file with Gob string -func (fs *FileSessionStore) SessionRelease(w http.ResponseWriter) { - filepder.lock.Lock() - defer filepder.lock.Unlock() - b, err := EncodeGob(fs.values) - if err != nil { - SLogger.Println(err) - return - } - _, err = os.Stat(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid)) - var f *os.File - if err == nil { - f, err = os.OpenFile(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid), os.O_RDWR, 0777) - if err != nil { - SLogger.Println(err) - return - } - } else if os.IsNotExist(err) { - f, err = os.Create(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid)) - if err != nil { - SLogger.Println(err) - return - } - } else { - return - } - f.Truncate(0) - f.Seek(0, 0) - f.Write(b) - f.Close() -} - -// FileProvider File session provider -type FileProvider struct { - lock sync.RWMutex - maxlifetime int64 - savePath string -} - -// SessionInit Init file session provider. -// savePath sets the session files path. -func (fp *FileProvider) SessionInit(maxlifetime int64, savePath string) error { - fp.maxlifetime = maxlifetime - fp.savePath = savePath - return nil -} - -// SessionRead Read file session by sid. -// if file is not exist, create it. -// the file path is generated from sid string. -func (fp *FileProvider) SessionRead(sid string) (Store, error) { - if strings.ContainsAny(sid, "./") { - return nil, nil - } - filepder.lock.Lock() - defer filepder.lock.Unlock() - - err := os.MkdirAll(path.Join(fp.savePath, string(sid[0]), string(sid[1])), 0777) - if err != nil { - SLogger.Println(err.Error()) - } - _, err = os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) - var f *os.File - if err == nil { - f, err = os.OpenFile(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), os.O_RDWR, 0777) - } else if os.IsNotExist(err) { - f, err = os.Create(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) - } else { - return nil, err - } - - defer f.Close() - - os.Chtimes(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), time.Now(), time.Now()) - var kv map[interface{}]interface{} - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - if len(b) == 0 { - kv = make(map[interface{}]interface{}) - } else { - kv, err = DecodeGob(b) - if err != nil { - return nil, err - } - } - - ss := &FileSessionStore{sid: sid, values: kv} - return ss, nil -} - -// SessionExist Check file session exist. -// it checks the file named from sid exist or not. -func (fp *FileProvider) SessionExist(sid string) bool { - filepder.lock.Lock() - defer filepder.lock.Unlock() - - _, err := os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) - return err == nil -} - -// SessionDestroy Remove all files in this save path -func (fp *FileProvider) SessionDestroy(sid string) error { - filepder.lock.Lock() - defer filepder.lock.Unlock() - os.Remove(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) - return nil -} - -// SessionGC Recycle files in save path -func (fp *FileProvider) SessionGC() { - filepder.lock.Lock() - defer filepder.lock.Unlock() - - gcmaxlifetime = fp.maxlifetime - filepath.Walk(fp.savePath, gcpath) -} - -// SessionAll Get active file session number. -// it walks save path to count files. -func (fp *FileProvider) SessionAll() int { - a := &activeSession{} - err := filepath.Walk(fp.savePath, func(path string, f os.FileInfo, err error) error { - return a.visit(path, f, err) - }) - if err != nil { - SLogger.Printf("filepath.Walk() returned %v\n", err) - return 0 - } - return a.total -} - -// SessionRegenerate Generate new sid for file session. -// it delete old file and create new file named from new sid. -func (fp *FileProvider) SessionRegenerate(oldsid, sid string) (Store, error) { - filepder.lock.Lock() - defer filepder.lock.Unlock() - - oldPath := path.Join(fp.savePath, string(oldsid[0]), string(oldsid[1])) - oldSidFile := path.Join(oldPath, oldsid) - newPath := path.Join(fp.savePath, string(sid[0]), string(sid[1])) - newSidFile := path.Join(newPath, sid) - - // new sid file is exist - _, err := os.Stat(newSidFile) - if err == nil { - return nil, fmt.Errorf("newsid %s exist", newSidFile) - } - - err = os.MkdirAll(newPath, 0777) - if err != nil { - SLogger.Println(err.Error()) - } - - // if old sid file exist - // 1.read and parse file content - // 2.write content to new sid file - // 3.remove old sid file, change new sid file atime and ctime - // 4.return FileSessionStore - _, err = os.Stat(oldSidFile) - if err == nil { - b, err := ioutil.ReadFile(oldSidFile) - if err != nil { - return nil, err - } - - var kv map[interface{}]interface{} - if len(b) == 0 { - kv = make(map[interface{}]interface{}) - } else { - kv, err = DecodeGob(b) - if err != nil { - return nil, err - } - } - - ioutil.WriteFile(newSidFile, b, 0777) - os.Remove(oldSidFile) - os.Chtimes(newSidFile, time.Now(), time.Now()) - ss := &FileSessionStore{sid: sid, values: kv} - return ss, nil - } - - // if old sid file not exist, just create new sid file and return - newf, err := os.Create(newSidFile) - if err != nil { - return nil, err - } - newf.Close() - ss := &FileSessionStore{sid: sid, values: make(map[interface{}]interface{})} - return ss, nil -} - -// remove file in save path if expired -func gcpath(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - if (info.ModTime().Unix() + gcmaxlifetime) < time.Now().Unix() { - os.Remove(path) - } - return nil -} - -type activeSession struct { - total int -} - -func (as *activeSession) visit(paths string, f os.FileInfo, err error) error { - if err != nil { - return err - } - if f.IsDir() { - return nil - } - as.total = as.total + 1 - return nil -} - -func init() { - Register("file", filepder) -} diff --git a/vendor/github.com/astaxie/beego/session/sess_mem.go b/vendor/github.com/astaxie/beego/session/sess_mem.go deleted file mode 100644 index 64d8b0561..000000000 --- a/vendor/github.com/astaxie/beego/session/sess_mem.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package session - -import ( - "container/list" - "net/http" - "sync" - "time" -) - -var mempder = &MemProvider{list: list.New(), sessions: make(map[string]*list.Element)} - -// MemSessionStore memory session store. -// it saved sessions in a map in memory. -type MemSessionStore struct { - sid string //session id - timeAccessed time.Time //last access time - value map[interface{}]interface{} //session store - lock sync.RWMutex -} - -// Set value to memory session -func (st *MemSessionStore) Set(key, value interface{}) error { - st.lock.Lock() - defer st.lock.Unlock() - st.value[key] = value - return nil -} - -// Get value from memory session by key -func (st *MemSessionStore) Get(key interface{}) interface{} { - st.lock.RLock() - defer st.lock.RUnlock() - if v, ok := st.value[key]; ok { - return v - } - return nil -} - -// Delete in memory session by key -func (st *MemSessionStore) Delete(key interface{}) error { - st.lock.Lock() - defer st.lock.Unlock() - delete(st.value, key) - return nil -} - -// Flush clear all values in memory session -func (st *MemSessionStore) Flush() error { - st.lock.Lock() - defer st.lock.Unlock() - st.value = make(map[interface{}]interface{}) - return nil -} - -// SessionID get this id of memory session store -func (st *MemSessionStore) SessionID() string { - return st.sid -} - -// SessionRelease Implement method, no used. -func (st *MemSessionStore) SessionRelease(w http.ResponseWriter) { -} - -// MemProvider Implement the provider interface -type MemProvider struct { - lock sync.RWMutex // locker - sessions map[string]*list.Element // map in memory - list *list.List // for gc - maxlifetime int64 - savePath string -} - -// SessionInit init memory session -func (pder *MemProvider) SessionInit(maxlifetime int64, savePath string) error { - pder.maxlifetime = maxlifetime - pder.savePath = savePath - return nil -} - -// SessionRead get memory session store by sid -func (pder *MemProvider) SessionRead(sid string) (Store, error) { - pder.lock.RLock() - if element, ok := pder.sessions[sid]; ok { - go pder.SessionUpdate(sid) - pder.lock.RUnlock() - return element.Value.(*MemSessionStore), nil - } - pder.lock.RUnlock() - pder.lock.Lock() - newsess := &MemSessionStore{sid: sid, timeAccessed: time.Now(), value: make(map[interface{}]interface{})} - element := pder.list.PushFront(newsess) - pder.sessions[sid] = element - pder.lock.Unlock() - return newsess, nil -} - -// SessionExist check session store exist in memory session by sid -func (pder *MemProvider) SessionExist(sid string) bool { - pder.lock.RLock() - defer pder.lock.RUnlock() - if _, ok := pder.sessions[sid]; ok { - return true - } - return false -} - -// SessionRegenerate generate new sid for session store in memory session -func (pder *MemProvider) SessionRegenerate(oldsid, sid string) (Store, error) { - pder.lock.RLock() - if element, ok := pder.sessions[oldsid]; ok { - go pder.SessionUpdate(oldsid) - pder.lock.RUnlock() - pder.lock.Lock() - element.Value.(*MemSessionStore).sid = sid - pder.sessions[sid] = element - delete(pder.sessions, oldsid) - pder.lock.Unlock() - return element.Value.(*MemSessionStore), nil - } - pder.lock.RUnlock() - pder.lock.Lock() - newsess := &MemSessionStore{sid: sid, timeAccessed: time.Now(), value: make(map[interface{}]interface{})} - element := pder.list.PushFront(newsess) - pder.sessions[sid] = element - pder.lock.Unlock() - return newsess, nil -} - -// SessionDestroy delete session store in memory session by id -func (pder *MemProvider) SessionDestroy(sid string) error { - pder.lock.Lock() - defer pder.lock.Unlock() - if element, ok := pder.sessions[sid]; ok { - delete(pder.sessions, sid) - pder.list.Remove(element) - return nil - } - return nil -} - -// SessionGC clean expired session stores in memory session -func (pder *MemProvider) SessionGC() { - pder.lock.RLock() - for { - element := pder.list.Back() - if element == nil { - break - } - if (element.Value.(*MemSessionStore).timeAccessed.Unix() + pder.maxlifetime) < time.Now().Unix() { - pder.lock.RUnlock() - pder.lock.Lock() - pder.list.Remove(element) - delete(pder.sessions, element.Value.(*MemSessionStore).sid) - pder.lock.Unlock() - pder.lock.RLock() - } else { - break - } - } - pder.lock.RUnlock() -} - -// SessionAll get count number of memory session -func (pder *MemProvider) SessionAll() int { - return pder.list.Len() -} - -// SessionUpdate expand time of session store by id in memory session -func (pder *MemProvider) SessionUpdate(sid string) error { - pder.lock.Lock() - defer pder.lock.Unlock() - if element, ok := pder.sessions[sid]; ok { - element.Value.(*MemSessionStore).timeAccessed = time.Now() - pder.list.MoveToFront(element) - return nil - } - return nil -} - -func init() { - Register("memory", mempder) -} diff --git a/vendor/github.com/astaxie/beego/session/sess_utils.go b/vendor/github.com/astaxie/beego/session/sess_utils.go deleted file mode 100644 index 2e3376c71..000000000 --- a/vendor/github.com/astaxie/beego/session/sess_utils.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package session - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/rand" - "crypto/sha1" - "crypto/subtle" - "encoding/base64" - "encoding/gob" - "errors" - "fmt" - "io" - "strconv" - "time" - - "github.com/astaxie/beego/utils" -) - -func init() { - gob.Register([]interface{}{}) - gob.Register(map[int]interface{}{}) - gob.Register(map[string]interface{}{}) - gob.Register(map[interface{}]interface{}{}) - gob.Register(map[string]string{}) - gob.Register(map[int]string{}) - gob.Register(map[int]int{}) - gob.Register(map[int]int64{}) -} - -// EncodeGob encode the obj to gob -func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) { - for _, v := range obj { - gob.Register(v) - } - buf := bytes.NewBuffer(nil) - enc := gob.NewEncoder(buf) - err := enc.Encode(obj) - if err != nil { - return []byte(""), err - } - return buf.Bytes(), nil -} - -// DecodeGob decode data to map -func DecodeGob(encoded []byte) (map[interface{}]interface{}, error) { - buf := bytes.NewBuffer(encoded) - dec := gob.NewDecoder(buf) - var out map[interface{}]interface{} - err := dec.Decode(&out) - if err != nil { - return nil, err - } - return out, nil -} - -// generateRandomKey creates a random key with the given strength. -func generateRandomKey(strength int) []byte { - k := make([]byte, strength) - if n, err := io.ReadFull(rand.Reader, k); n != strength || err != nil { - return utils.RandomCreateBytes(strength) - } - return k -} - -// Encryption ----------------------------------------------------------------- - -// encrypt encrypts a value using the given block in counter mode. -// -// A random initialization vector (http://goo.gl/zF67k) with the length of the -// block size is prepended to the resulting ciphertext. -func encrypt(block cipher.Block, value []byte) ([]byte, error) { - iv := generateRandomKey(block.BlockSize()) - if iv == nil { - return nil, errors.New("encrypt: failed to generate random iv") - } - // Encrypt it. - stream := cipher.NewCTR(block, iv) - stream.XORKeyStream(value, value) - // Return iv + ciphertext. - return append(iv, value...), nil -} - -// decrypt decrypts a value using the given block in counter mode. -// -// The value to be decrypted must be prepended by a initialization vector -// (http://goo.gl/zF67k) with the length of the block size. -func decrypt(block cipher.Block, value []byte) ([]byte, error) { - size := block.BlockSize() - if len(value) > size { - // Extract iv. - iv := value[:size] - // Extract ciphertext. - value = value[size:] - // Decrypt it. - stream := cipher.NewCTR(block, iv) - stream.XORKeyStream(value, value) - return value, nil - } - return nil, errors.New("decrypt: the value could not be decrypted") -} - -func encodeCookie(block cipher.Block, hashKey, name string, value map[interface{}]interface{}) (string, error) { - var err error - var b []byte - // 1. EncodeGob. - if b, err = EncodeGob(value); err != nil { - return "", err - } - // 2. Encrypt (optional). - if b, err = encrypt(block, b); err != nil { - return "", err - } - b = encode(b) - // 3. Create MAC for "name|date|value". Extra pipe to be used later. - b = []byte(fmt.Sprintf("%s|%d|%s|", name, time.Now().UTC().Unix(), b)) - h := hmac.New(sha1.New, []byte(hashKey)) - h.Write(b) - sig := h.Sum(nil) - // Append mac, remove name. - b = append(b, sig...)[len(name)+1:] - // 4. Encode to base64. - b = encode(b) - // Done. - return string(b), nil -} - -func decodeCookie(block cipher.Block, hashKey, name, value string, gcmaxlifetime int64) (map[interface{}]interface{}, error) { - // 1. Decode from base64. - b, err := decode([]byte(value)) - if err != nil { - return nil, err - } - // 2. Verify MAC. Value is "date|value|mac". - parts := bytes.SplitN(b, []byte("|"), 3) - if len(parts) != 3 { - return nil, errors.New("Decode: invalid value format") - } - - b = append([]byte(name+"|"), b[:len(b)-len(parts[2])]...) - h := hmac.New(sha1.New, []byte(hashKey)) - h.Write(b) - sig := h.Sum(nil) - if len(sig) != len(parts[2]) || subtle.ConstantTimeCompare(sig, parts[2]) != 1 { - return nil, errors.New("Decode: the value is not valid") - } - // 3. Verify date ranges. - var t1 int64 - if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { - return nil, errors.New("Decode: invalid timestamp") - } - t2 := time.Now().UTC().Unix() - if t1 > t2 { - return nil, errors.New("Decode: timestamp is too new") - } - if t1 < t2-gcmaxlifetime { - return nil, errors.New("Decode: expired timestamp") - } - // 4. Decrypt (optional). - b, err = decode(parts[1]) - if err != nil { - return nil, err - } - if b, err = decrypt(block, b); err != nil { - return nil, err - } - // 5. DecodeGob. - dst, err := DecodeGob(b) - if err != nil { - return nil, err - } - return dst, nil -} - -// Encoding ------------------------------------------------------------------- - -// encode encodes a value using base64. -func encode(value []byte) []byte { - encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) - base64.URLEncoding.Encode(encoded, value) - return encoded -} - -// decode decodes a cookie using base64. -func decode(value []byte) ([]byte, error) { - decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) - b, err := base64.URLEncoding.Decode(decoded, value) - if err != nil { - return nil, err - } - return decoded[:b], nil -} diff --git a/vendor/github.com/astaxie/beego/session/session.go b/vendor/github.com/astaxie/beego/session/session.go deleted file mode 100644 index c7e7dc69d..000000000 --- a/vendor/github.com/astaxie/beego/session/session.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package session provider -// -// Usage: -// import( -// "github.com/astaxie/beego/session" -// ) -// -// func init() { -// globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid", "enableSetCookie,omitempty": true, "gclifetime":3600, "maxLifetime": 3600, "secure": false, "cookieLifeTime": 3600, "providerConfig": ""}`) -// go globalSessions.GC() -// } -// -// more docs: http://beego.me/docs/module/session.md -package session - -import ( - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "log" - "net/http" - "net/textproto" - "net/url" - "os" - "time" -) - -// Store contains all data for one session process with specific id. -type Store interface { - Set(key, value interface{}) error //set session value - Get(key interface{}) interface{} //get session value - Delete(key interface{}) error //delete session value - SessionID() string //back current sessionID - SessionRelease(w http.ResponseWriter) // release the resource & save data to provider & return the data - Flush() error //delete all data -} - -// Provider contains global session methods and saved SessionStores. -// it can operate a SessionStore by its id. -type Provider interface { - SessionInit(gclifetime int64, config string) error - SessionRead(sid string) (Store, error) - SessionExist(sid string) bool - SessionRegenerate(oldsid, sid string) (Store, error) - SessionDestroy(sid string) error - SessionAll() int //get all active session - SessionGC() -} - -var provides = make(map[string]Provider) - -// SLogger a helpful variable to log information about session -var SLogger = NewSessionLog(os.Stderr) - -// Register makes a session provide available by the provided name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, provide Provider) { - if provide == nil { - panic("session: Register provide is nil") - } - if _, dup := provides[name]; dup { - panic("session: Register called twice for provider " + name) - } - provides[name] = provide -} - -// ManagerConfig define the session config -type ManagerConfig struct { - CookieName string `json:"cookieName"` - EnableSetCookie bool `json:"enableSetCookie,omitempty"` - Gclifetime int64 `json:"gclifetime"` - Maxlifetime int64 `json:"maxLifetime"` - DisableHTTPOnly bool `json:"disableHTTPOnly"` - Secure bool `json:"secure"` - CookieLifeTime int `json:"cookieLifeTime"` - ProviderConfig string `json:"providerConfig"` - Domain string `json:"domain"` - SessionIDLength int64 `json:"sessionIDLength"` - EnableSidInHTTPHeader bool `json:"EnableSidInHTTPHeader"` - SessionNameInHTTPHeader string `json:"SessionNameInHTTPHeader"` - EnableSidInURLQuery bool `json:"EnableSidInURLQuery"` - SessionIDPrefix string `json:"sessionIDPrefix"` -} - -// Manager contains Provider and its configuration. -type Manager struct { - provider Provider - config *ManagerConfig -} - -// NewManager Create new Manager with provider name and json config string. -// provider name: -// 1. cookie -// 2. file -// 3. memory -// 4. redis -// 5. mysql -// json config: -// 1. is https default false -// 2. hashfunc default sha1 -// 3. hashkey default beegosessionkey -// 4. maxage default is none -func NewManager(provideName string, cf *ManagerConfig) (*Manager, error) { - provider, ok := provides[provideName] - if !ok { - return nil, fmt.Errorf("session: unknown provide %q (forgotten import?)", provideName) - } - - if cf.Maxlifetime == 0 { - cf.Maxlifetime = cf.Gclifetime - } - - if cf.EnableSidInHTTPHeader { - if cf.SessionNameInHTTPHeader == "" { - panic(errors.New("SessionNameInHTTPHeader is empty")) - } - - strMimeHeader := textproto.CanonicalMIMEHeaderKey(cf.SessionNameInHTTPHeader) - if cf.SessionNameInHTTPHeader != strMimeHeader { - strErrMsg := "SessionNameInHTTPHeader (" + cf.SessionNameInHTTPHeader + ") has the wrong format, it should be like this : " + strMimeHeader - panic(errors.New(strErrMsg)) - } - } - - err := provider.SessionInit(cf.Maxlifetime, cf.ProviderConfig) - if err != nil { - return nil, err - } - - if cf.SessionIDLength == 0 { - cf.SessionIDLength = 16 - } - - return &Manager{ - provider, - cf, - }, nil -} - -// GetProvider return current manager's provider -func (manager *Manager) GetProvider() Provider { - return manager.provider -} - -// getSid retrieves session identifier from HTTP Request. -// First try to retrieve id by reading from cookie, session cookie name is configurable, -// if not exist, then retrieve id from querying parameters. -// -// error is not nil when there is anything wrong. -// sid is empty when need to generate a new session id -// otherwise return an valid session id. -func (manager *Manager) getSid(r *http.Request) (string, error) { - cookie, errs := r.Cookie(manager.config.CookieName) - if errs != nil || cookie.Value == "" { - var sid string - if manager.config.EnableSidInURLQuery { - errs := r.ParseForm() - if errs != nil { - return "", errs - } - - sid = r.FormValue(manager.config.CookieName) - } - - // if not found in Cookie / param, then read it from request headers - if manager.config.EnableSidInHTTPHeader && sid == "" { - sids, isFound := r.Header[manager.config.SessionNameInHTTPHeader] - if isFound && len(sids) != 0 { - return sids[0], nil - } - } - - return sid, nil - } - - // HTTP Request contains cookie for sessionid info. - return url.QueryUnescape(cookie.Value) -} - -// SessionStart generate or read the session id from http request. -// if session id exists, return SessionStore with this id. -func (manager *Manager) SessionStart(w http.ResponseWriter, r *http.Request) (session Store, err error) { - sid, errs := manager.getSid(r) - if errs != nil { - return nil, errs - } - - if sid != "" && manager.provider.SessionExist(sid) { - return manager.provider.SessionRead(sid) - } - - // Generate a new session - sid, errs = manager.sessionID() - if errs != nil { - return nil, errs - } - - session, err = manager.provider.SessionRead(sid) - if err != nil { - return nil, err - } - cookie := &http.Cookie{ - Name: manager.config.CookieName, - Value: url.QueryEscape(sid), - Path: "/", - HttpOnly: !manager.config.DisableHTTPOnly, - Secure: manager.isSecure(r), - Domain: manager.config.Domain, - } - if manager.config.CookieLifeTime > 0 { - cookie.MaxAge = manager.config.CookieLifeTime - cookie.Expires = time.Now().Add(time.Duration(manager.config.CookieLifeTime) * time.Second) - } - if manager.config.EnableSetCookie { - http.SetCookie(w, cookie) - } - r.AddCookie(cookie) - - if manager.config.EnableSidInHTTPHeader { - r.Header.Set(manager.config.SessionNameInHTTPHeader, sid) - w.Header().Set(manager.config.SessionNameInHTTPHeader, sid) - } - - return -} - -// SessionDestroy Destroy session by its id in http request cookie. -func (manager *Manager) SessionDestroy(w http.ResponseWriter, r *http.Request) { - if manager.config.EnableSidInHTTPHeader { - r.Header.Del(manager.config.SessionNameInHTTPHeader) - w.Header().Del(manager.config.SessionNameInHTTPHeader) - } - - cookie, err := r.Cookie(manager.config.CookieName) - if err != nil || cookie.Value == "" { - return - } - - sid, _ := url.QueryUnescape(cookie.Value) - manager.provider.SessionDestroy(sid) - if manager.config.EnableSetCookie { - expiration := time.Now() - cookie = &http.Cookie{Name: manager.config.CookieName, - Path: "/", - HttpOnly: !manager.config.DisableHTTPOnly, - Expires: expiration, - MaxAge: -1} - - http.SetCookie(w, cookie) - } -} - -// GetSessionStore Get SessionStore by its id. -func (manager *Manager) GetSessionStore(sid string) (sessions Store, err error) { - sessions, err = manager.provider.SessionRead(sid) - return -} - -// GC Start session gc process. -// it can do gc in times after gc lifetime. -func (manager *Manager) GC() { - manager.provider.SessionGC() - time.AfterFunc(time.Duration(manager.config.Gclifetime)*time.Second, func() { manager.GC() }) -} - -// SessionRegenerateID Regenerate a session id for this SessionStore who's id is saving in http request. -func (manager *Manager) SessionRegenerateID(w http.ResponseWriter, r *http.Request) (session Store) { - sid, err := manager.sessionID() - if err != nil { - return - } - cookie, err := r.Cookie(manager.config.CookieName) - if err != nil || cookie.Value == "" { - //delete old cookie - session, _ = manager.provider.SessionRead(sid) - cookie = &http.Cookie{Name: manager.config.CookieName, - Value: url.QueryEscape(sid), - Path: "/", - HttpOnly: !manager.config.DisableHTTPOnly, - Secure: manager.isSecure(r), - Domain: manager.config.Domain, - } - } else { - oldsid, _ := url.QueryUnescape(cookie.Value) - session, _ = manager.provider.SessionRegenerate(oldsid, sid) - cookie.Value = url.QueryEscape(sid) - cookie.HttpOnly = true - cookie.Path = "/" - } - if manager.config.CookieLifeTime > 0 { - cookie.MaxAge = manager.config.CookieLifeTime - cookie.Expires = time.Now().Add(time.Duration(manager.config.CookieLifeTime) * time.Second) - } - if manager.config.EnableSetCookie { - http.SetCookie(w, cookie) - } - r.AddCookie(cookie) - - if manager.config.EnableSidInHTTPHeader { - r.Header.Set(manager.config.SessionNameInHTTPHeader, sid) - w.Header().Set(manager.config.SessionNameInHTTPHeader, sid) - } - - return -} - -// GetActiveSession Get all active sessions count number. -func (manager *Manager) GetActiveSession() int { - return manager.provider.SessionAll() -} - -// SetSecure Set cookie with https. -func (manager *Manager) SetSecure(secure bool) { - manager.config.Secure = secure -} - -func (manager *Manager) sessionID() (string, error) { - b := make([]byte, manager.config.SessionIDLength) - n, err := rand.Read(b) - if n != len(b) || err != nil { - return "", fmt.Errorf("Could not successfully read from the system CSPRNG") - } - return manager.config.SessionIDPrefix + hex.EncodeToString(b), nil -} - -// Set cookie with https. -func (manager *Manager) isSecure(req *http.Request) bool { - if !manager.config.Secure { - return false - } - if req.URL.Scheme != "" { - return req.URL.Scheme == "https" - } - if req.TLS == nil { - return false - } - return true -} - -// Log implement the log.Logger -type Log struct { - *log.Logger -} - -// NewSessionLog set io.Writer to create a Logger for session. -func NewSessionLog(out io.Writer) *Log { - sl := new(Log) - sl.Logger = log.New(out, "[SESSION]", 1e9) - return sl -} diff --git a/vendor/github.com/astaxie/beego/staticfile.go b/vendor/github.com/astaxie/beego/staticfile.go deleted file mode 100644 index 68241a865..000000000 --- a/vendor/github.com/astaxie/beego/staticfile.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "bytes" - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/astaxie/beego/context" - "github.com/astaxie/beego/logs" -) - -var errNotStaticRequest = errors.New("request not a static file request") - -func serverStaticRouter(ctx *context.Context) { - if ctx.Input.Method() != "GET" && ctx.Input.Method() != "HEAD" { - return - } - - forbidden, filePath, fileInfo, err := lookupFile(ctx) - if err == errNotStaticRequest { - return - } - - if forbidden { - exception("403", ctx) - return - } - - if filePath == "" || fileInfo == nil { - if BConfig.RunMode == DEV { - logs.Warn("Can't find/open the file:", filePath, err) - } - http.NotFound(ctx.ResponseWriter, ctx.Request) - return - } - if fileInfo.IsDir() { - requestURL := ctx.Input.URL() - if requestURL[len(requestURL)-1] != '/' { - redirectURL := requestURL + "/" - if ctx.Request.URL.RawQuery != "" { - redirectURL = redirectURL + "?" + ctx.Request.URL.RawQuery - } - ctx.Redirect(302, redirectURL) - } else { - //serveFile will list dir - http.ServeFile(ctx.ResponseWriter, ctx.Request, filePath) - } - return - } - - var enableCompress = BConfig.EnableGzip && isStaticCompress(filePath) - var acceptEncoding string - if enableCompress { - acceptEncoding = context.ParseEncoding(ctx.Request) - } - b, n, sch, reader, err := openFile(filePath, fileInfo, acceptEncoding) - if err != nil { - if BConfig.RunMode == DEV { - logs.Warn("Can't compress the file:", filePath, err) - } - http.NotFound(ctx.ResponseWriter, ctx.Request) - return - } - - if b { - ctx.Output.Header("Content-Encoding", n) - } else { - ctx.Output.Header("Content-Length", strconv.FormatInt(sch.size, 10)) - } - - http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, reader) -} - -type serveContentHolder struct { - data []byte - modTime time.Time - size int64 - encoding string -} - -type serveContentReader struct { - *bytes.Reader -} - -var ( - staticFileMap = make(map[string]*serveContentHolder) - mapLock sync.RWMutex -) - -func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, *serveContentReader, error) { - mapKey := acceptEncoding + ":" + filePath - mapLock.RLock() - mapFile := staticFileMap[mapKey] - mapLock.RUnlock() - if isOk(mapFile, fi) { - reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)} - return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil - } - mapLock.Lock() - defer mapLock.Unlock() - if mapFile = staticFileMap[mapKey]; !isOk(mapFile, fi) { - file, err := os.Open(filePath) - if err != nil { - return false, "", nil, nil, err - } - defer file.Close() - var bufferWriter bytes.Buffer - _, n, err := context.WriteFile(acceptEncoding, &bufferWriter, file) - if err != nil { - return false, "", nil, nil, err - } - mapFile = &serveContentHolder{data: bufferWriter.Bytes(), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), encoding: n} - staticFileMap[mapKey] = mapFile - } - - reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)} - return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil -} - -func isOk(s *serveContentHolder, fi os.FileInfo) bool { - if s == nil { - return false - } - return s.modTime == fi.ModTime() && s.size == fi.Size() -} - -// isStaticCompress detect static files -func isStaticCompress(filePath string) bool { - for _, statExtension := range BConfig.WebConfig.StaticExtensionsToGzip { - if strings.HasSuffix(strings.ToLower(filePath), strings.ToLower(statExtension)) { - return true - } - } - return false -} - -// searchFile search the file by url path -// if none the static file prefix matches ,return notStaticRequestErr -func searchFile(ctx *context.Context) (string, os.FileInfo, error) { - requestPath := filepath.ToSlash(filepath.Clean(ctx.Request.URL.Path)) - // special processing : favicon.ico/robots.txt can be in any static dir - if requestPath == "/favicon.ico" || requestPath == "/robots.txt" { - file := path.Join(".", requestPath) - if fi, _ := os.Stat(file); fi != nil { - return file, fi, nil - } - for _, staticDir := range BConfig.WebConfig.StaticDir { - filePath := path.Join(staticDir, requestPath) - if fi, _ := os.Stat(filePath); fi != nil { - return filePath, fi, nil - } - } - return "", nil, errNotStaticRequest - } - - for prefix, staticDir := range BConfig.WebConfig.StaticDir { - if !strings.Contains(requestPath, prefix) { - continue - } - if prefix != "/" && len(requestPath) > len(prefix) && requestPath[len(prefix)] != '/' { - continue - } - filePath := path.Join(staticDir, requestPath[len(prefix):]) - if fi, err := os.Stat(filePath); fi != nil { - return filePath, fi, err - } - } - return "", nil, errNotStaticRequest -} - -// lookupFile find the file to serve -// if the file is dir ,search the index.html as default file( MUST NOT A DIR also) -// if the index.html not exist or is a dir, give a forbidden response depending on DirectoryIndex -func lookupFile(ctx *context.Context) (bool, string, os.FileInfo, error) { - fp, fi, err := searchFile(ctx) - if fp == "" || fi == nil { - return false, "", nil, err - } - if !fi.IsDir() { - return false, fp, fi, err - } - if requestURL := ctx.Input.URL(); requestURL[len(requestURL)-1] == '/' { - ifp := filepath.Join(fp, "index.html") - if ifi, _ := os.Stat(ifp); ifi != nil && ifi.Mode().IsRegular() { - return false, ifp, ifi, err - } - } - return !BConfig.WebConfig.DirectoryIndex, fp, fi, err -} diff --git a/vendor/github.com/astaxie/beego/template.go b/vendor/github.com/astaxie/beego/template.go deleted file mode 100644 index cf41cb9b3..000000000 --- a/vendor/github.com/astaxie/beego/template.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "errors" - "fmt" - "html/template" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "regexp" - "strings" - "sync" - - "github.com/astaxie/beego/logs" - "github.com/astaxie/beego/utils" -) - -var ( - beegoTplFuncMap = make(template.FuncMap) - beeViewPathTemplateLocked = false - // beeViewPathTemplates caching map and supported template file extensions per view - beeViewPathTemplates = make(map[string]map[string]*template.Template) - templatesLock sync.RWMutex - // beeTemplateExt stores the template extension which will build - beeTemplateExt = []string{"tpl", "html"} - // beeTemplatePreprocessors stores associations of extension -> preprocessor handler - beeTemplateEngines = map[string]templatePreProcessor{} - beeTemplateFS = defaultFSFunc -) - -// ExecuteTemplate applies the template with name to the specified data object, -// writing the output to wr. -// A template will be executed safely in parallel. -func ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - return ExecuteViewPathTemplate(wr, name, BConfig.WebConfig.ViewsPath, data) -} - -// ExecuteViewPathTemplate applies the template with name and from specific viewPath to the specified data object, -// writing the output to wr. -// A template will be executed safely in parallel. -func ExecuteViewPathTemplate(wr io.Writer, name string, viewPath string, data interface{}) error { - if BConfig.RunMode == DEV { - templatesLock.RLock() - defer templatesLock.RUnlock() - } - if beeTemplates, ok := beeViewPathTemplates[viewPath]; ok { - if t, ok := beeTemplates[name]; ok { - var err error - if t.Lookup(name) != nil { - err = t.ExecuteTemplate(wr, name, data) - } else { - err = t.Execute(wr, data) - } - if err != nil { - logs.Trace("template Execute err:", err) - } - return err - } - panic("can't find templatefile in the path:" + viewPath + "/" + name) - } - panic("Unknown view path:" + viewPath) -} - -func init() { - beegoTplFuncMap["dateformat"] = DateFormat - beegoTplFuncMap["date"] = Date - beegoTplFuncMap["compare"] = Compare - beegoTplFuncMap["compare_not"] = CompareNot - beegoTplFuncMap["not_nil"] = NotNil - beegoTplFuncMap["not_null"] = NotNil - beegoTplFuncMap["substr"] = Substr - beegoTplFuncMap["html2str"] = HTML2str - beegoTplFuncMap["str2html"] = Str2html - beegoTplFuncMap["htmlquote"] = Htmlquote - beegoTplFuncMap["htmlunquote"] = Htmlunquote - beegoTplFuncMap["renderform"] = RenderForm - beegoTplFuncMap["assets_js"] = AssetsJs - beegoTplFuncMap["assets_css"] = AssetsCSS - beegoTplFuncMap["config"] = GetConfig - beegoTplFuncMap["map_get"] = MapGet - - // Comparisons - beegoTplFuncMap["eq"] = eq // == - beegoTplFuncMap["ge"] = ge // >= - beegoTplFuncMap["gt"] = gt // > - beegoTplFuncMap["le"] = le // <= - beegoTplFuncMap["lt"] = lt // < - beegoTplFuncMap["ne"] = ne // != - - beegoTplFuncMap["urlfor"] = URLFor // build a URL to match a Controller and it's method -} - -// AddFuncMap let user to register a func in the template. -func AddFuncMap(key string, fn interface{}) error { - beegoTplFuncMap[key] = fn - return nil -} - -type templatePreProcessor func(root, path string, funcs template.FuncMap) (*template.Template, error) - -type templateFile struct { - root string - files map[string][]string -} - -// visit will make the paths into two part,the first is subDir (without tf.root),the second is full path(without tf.root). -// if tf.root="views" and -// paths is "views/errors/404.html",the subDir will be "errors",the file will be "errors/404.html" -// paths is "views/admin/errors/404.html",the subDir will be "admin/errors",the file will be "admin/errors/404.html" -func (tf *templateFile) visit(paths string, f os.FileInfo, err error) error { - if f == nil { - return err - } - if f.IsDir() || (f.Mode()&os.ModeSymlink) > 0 { - return nil - } - if !HasTemplateExt(paths) { - return nil - } - - replace := strings.NewReplacer("\\", "/") - file := strings.TrimLeft(replace.Replace(paths[len(tf.root):]), "/") - subDir := filepath.Dir(file) - - tf.files[subDir] = append(tf.files[subDir], file) - return nil -} - -// HasTemplateExt return this path contains supported template extension of beego or not. -func HasTemplateExt(paths string) bool { - for _, v := range beeTemplateExt { - if strings.HasSuffix(paths, "."+v) { - return true - } - } - return false -} - -// AddTemplateExt add new extension for template. -func AddTemplateExt(ext string) { - for _, v := range beeTemplateExt { - if v == ext { - return - } - } - beeTemplateExt = append(beeTemplateExt, ext) -} - -// AddViewPath adds a new path to the supported view paths. -//Can later be used by setting a controller ViewPath to this folder -//will panic if called after beego.Run() -func AddViewPath(viewPath string) error { - if beeViewPathTemplateLocked { - if _, exist := beeViewPathTemplates[viewPath]; exist { - return nil //Ignore if viewpath already exists - } - panic("Can not add new view paths after beego.Run()") - } - beeViewPathTemplates[viewPath] = make(map[string]*template.Template) - return BuildTemplate(viewPath) -} - -func lockViewPaths() { - beeViewPathTemplateLocked = true -} - -// BuildTemplate will build all template files in a directory. -// it makes beego can render any template file in view directory. -func BuildTemplate(dir string, files ...string) error { - var err error - fs := beeTemplateFS() - f, err := fs.Open(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.New("dir open err") - } - defer f.Close() - - beeTemplates, ok := beeViewPathTemplates[dir] - if !ok { - panic("Unknown view path: " + dir) - } - self := &templateFile{ - root: dir, - files: make(map[string][]string), - } - err = Walk(fs, dir, func(path string, f os.FileInfo, err error) error { - return self.visit(path, f, err) - }) - if err != nil { - fmt.Printf("Walk() returned %v\n", err) - return err - } - buildAllFiles := len(files) == 0 - for _, v := range self.files { - for _, file := range v { - if buildAllFiles || utils.InSlice(file, files) { - templatesLock.Lock() - ext := filepath.Ext(file) - var t *template.Template - if len(ext) == 0 { - t, err = getTemplate(self.root, fs, file, v...) - } else if fn, ok := beeTemplateEngines[ext[1:]]; ok { - t, err = fn(self.root, file, beegoTplFuncMap) - } else { - t, err = getTemplate(self.root, fs, file, v...) - } - if err != nil { - logs.Error("parse template err:", file, err) - templatesLock.Unlock() - return err - } - beeTemplates[file] = t - templatesLock.Unlock() - } - } - } - return nil -} - -func getTplDeep(root string, fs http.FileSystem, file string, parent string, t *template.Template) (*template.Template, [][]string, error) { - var fileAbsPath string - var rParent string - var err error - if filepath.HasPrefix(file, "../") { - rParent = filepath.Join(filepath.Dir(parent), file) - fileAbsPath = filepath.Join(root, filepath.Dir(parent), file) - } else { - rParent = file - fileAbsPath = filepath.Join(root, file) - } - f, err := fs.Open(fileAbsPath) - defer f.Close() - if err != nil { - panic("can't find template file:" + file) - } - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, [][]string{}, err - } - t, err = t.New(file).Parse(string(data)) - if err != nil { - return nil, [][]string{}, err - } - reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*template[ ]+\"([^\"]+)\"") - allSub := reg.FindAllStringSubmatch(string(data), -1) - for _, m := range allSub { - if len(m) == 2 { - tl := t.Lookup(m[1]) - if tl != nil { - continue - } - if !HasTemplateExt(m[1]) { - continue - } - _, _, err = getTplDeep(root, fs, m[1], rParent, t) - if err != nil { - return nil, [][]string{}, err - } - } - } - return t, allSub, nil -} - -func getTemplate(root string, fs http.FileSystem, file string, others ...string) (t *template.Template, err error) { - t = template.New(file).Delims(BConfig.WebConfig.TemplateLeft, BConfig.WebConfig.TemplateRight).Funcs(beegoTplFuncMap) - var subMods [][]string - t, subMods, err = getTplDeep(root, fs, file, "", t) - if err != nil { - return nil, err - } - t, err = _getTemplate(t, root, fs, subMods, others...) - - if err != nil { - return nil, err - } - return -} - -func _getTemplate(t0 *template.Template, root string, fs http.FileSystem, subMods [][]string, others ...string) (t *template.Template, err error) { - t = t0 - for _, m := range subMods { - if len(m) == 2 { - tpl := t.Lookup(m[1]) - if tpl != nil { - continue - } - //first check filename - for _, otherFile := range others { - if otherFile == m[1] { - var subMods1 [][]string - t, subMods1, err = getTplDeep(root, fs, otherFile, "", t) - if err != nil { - logs.Trace("template parse file err:", err) - } else if len(subMods1) > 0 { - t, err = _getTemplate(t, root, fs, subMods1, others...) - } - break - } - } - //second check define - for _, otherFile := range others { - var data []byte - fileAbsPath := filepath.Join(root, otherFile) - f, err := fs.Open(fileAbsPath) - if err != nil { - f.Close() - logs.Trace("template file parse error, not success open file:", err) - continue - } - data, err = ioutil.ReadAll(f) - f.Close() - if err != nil { - logs.Trace("template file parse error, not success read file:", err) - continue - } - reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*define[ ]+\"([^\"]+)\"") - allSub := reg.FindAllStringSubmatch(string(data), -1) - for _, sub := range allSub { - if len(sub) == 2 && sub[1] == m[1] { - var subMods1 [][]string - t, subMods1, err = getTplDeep(root, fs, otherFile, "", t) - if err != nil { - logs.Trace("template parse file err:", err) - } else if len(subMods1) > 0 { - t, err = _getTemplate(t, root, fs, subMods1, others...) - if err != nil { - logs.Trace("template parse file err:", err) - } - } - break - } - } - } - } - - } - return -} - -type templateFSFunc func() http.FileSystem - -func defaultFSFunc() http.FileSystem { - return FileSystem{} -} - -// SetTemplateFSFunc set default filesystem function -func SetTemplateFSFunc(fnt templateFSFunc) { - beeTemplateFS = fnt -} - -// SetViewsPath sets view directory path in beego application. -func SetViewsPath(path string) *App { - BConfig.WebConfig.ViewsPath = path - return BeeApp -} - -// SetStaticPath sets static directory path and proper url pattern in beego application. -// if beego.SetStaticPath("static","public"), visit /static/* to load static file in folder "public". -func SetStaticPath(url string, path string) *App { - if !strings.HasPrefix(url, "/") { - url = "/" + url - } - if url != "/" { - url = strings.TrimRight(url, "/") - } - BConfig.WebConfig.StaticDir[url] = path - return BeeApp -} - -// DelStaticPath removes the static folder setting in this url pattern in beego application. -func DelStaticPath(url string) *App { - if !strings.HasPrefix(url, "/") { - url = "/" + url - } - if url != "/" { - url = strings.TrimRight(url, "/") - } - delete(BConfig.WebConfig.StaticDir, url) - return BeeApp -} - -// AddTemplateEngine add a new templatePreProcessor which support extension -func AddTemplateEngine(extension string, fn templatePreProcessor) *App { - AddTemplateExt(extension) - beeTemplateEngines[extension] = fn - return BeeApp -} diff --git a/vendor/github.com/astaxie/beego/templatefunc.go b/vendor/github.com/astaxie/beego/templatefunc.go deleted file mode 100644 index 8c1504aad..000000000 --- a/vendor/github.com/astaxie/beego/templatefunc.go +++ /dev/null @@ -1,766 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "errors" - "fmt" - "html" - "html/template" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - formatTime = "15:04:05" - formatDate = "2006-01-02" - formatDateTime = "2006-01-02 15:04:05" - formatDateTimeT = "2006-01-02T15:04:05" -) - -// Substr returns the substr from start to length. -func Substr(s string, start, length int) string { - bt := []rune(s) - if start < 0 { - start = 0 - } - if start > len(bt) { - start = start % len(bt) - } - var end int - if (start + length) > (len(bt) - 1) { - end = len(bt) - } else { - end = start + length - } - return string(bt[start:end]) -} - -// HTML2str returns escaping text convert from html. -func HTML2str(html string) string { - - re, _ := regexp.Compile(`\<[\S\s]+?\>`) - html = re.ReplaceAllStringFunc(html, strings.ToLower) - - //remove STYLE - re, _ = regexp.Compile(`\`) - html = re.ReplaceAllString(html, "") - - //remove SCRIPT - re, _ = regexp.Compile(`\`) - html = re.ReplaceAllString(html, "") - - re, _ = regexp.Compile(`\<[\S\s]+?\>`) - html = re.ReplaceAllString(html, "\n") - - re, _ = regexp.Compile(`\s{2,}`) - html = re.ReplaceAllString(html, "\n") - - return strings.TrimSpace(html) -} - -// DateFormat takes a time and a layout string and returns a string with the formatted date. Used by the template parser as "dateformat" -func DateFormat(t time.Time, layout string) (datestring string) { - datestring = t.Format(layout) - return -} - -// DateFormat pattern rules. -var datePatterns = []string{ - // year - "Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003 - "y", "06", //A two digit representation of a year Examples: 99 or 03 - - // month - "m", "01", // Numeric representation of a month, with leading zeros 01 through 12 - "n", "1", // Numeric representation of a month, without leading zeros 1 through 12 - "M", "Jan", // A short textual representation of a month, three letters Jan through Dec - "F", "January", // A full textual representation of a month, such as January or March January through December - - // day - "d", "02", // Day of the month, 2 digits with leading zeros 01 to 31 - "j", "2", // Day of the month without leading zeros 1 to 31 - - // week - "D", "Mon", // A textual representation of a day, three letters Mon through Sun - "l", "Monday", // A full textual representation of the day of the week Sunday through Saturday - - // time - "g", "3", // 12-hour format of an hour without leading zeros 1 through 12 - "G", "15", // 24-hour format of an hour without leading zeros 0 through 23 - "h", "03", // 12-hour format of an hour with leading zeros 01 through 12 - "H", "15", // 24-hour format of an hour with leading zeros 00 through 23 - - "a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm - "A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM - - "i", "04", // Minutes with leading zeros 00 to 59 - "s", "05", // Seconds, with leading zeros 00 through 59 - - // time zone - "T", "MST", - "P", "-07:00", - "O", "-0700", - - // RFC 2822 - "r", time.RFC1123Z, -} - -// DateParse Parse Date use PHP time format. -func DateParse(dateString, format string) (time.Time, error) { - replacer := strings.NewReplacer(datePatterns...) - format = replacer.Replace(format) - return time.ParseInLocation(format, dateString, time.Local) -} - -// Date takes a PHP like date func to Go's time format. -func Date(t time.Time, format string) string { - replacer := strings.NewReplacer(datePatterns...) - format = replacer.Replace(format) - return t.Format(format) -} - -// Compare is a quick and dirty comparison function. It will convert whatever you give it to strings and see if the two values are equal. -// Whitespace is trimmed. Used by the template parser as "eq". -func Compare(a, b interface{}) (equal bool) { - equal = false - if strings.TrimSpace(fmt.Sprintf("%v", a)) == strings.TrimSpace(fmt.Sprintf("%v", b)) { - equal = true - } - return -} - -// CompareNot !Compare -func CompareNot(a, b interface{}) (equal bool) { - return !Compare(a, b) -} - -// NotNil the same as CompareNot -func NotNil(a interface{}) (isNil bool) { - return CompareNot(a, nil) -} - -// GetConfig get the Appconfig -func GetConfig(returnType, key string, defaultVal interface{}) (value interface{}, err error) { - switch returnType { - case "String": - value = AppConfig.String(key) - case "Bool": - value, err = AppConfig.Bool(key) - case "Int": - value, err = AppConfig.Int(key) - case "Int64": - value, err = AppConfig.Int64(key) - case "Float": - value, err = AppConfig.Float(key) - case "DIY": - value, err = AppConfig.DIY(key) - default: - err = errors.New("Config keys must be of type String, Bool, Int, Int64, Float, or DIY") - } - - if err != nil { - if reflect.TypeOf(returnType) != reflect.TypeOf(defaultVal) { - err = errors.New("defaultVal type does not match returnType") - } else { - value, err = defaultVal, nil - } - } else if reflect.TypeOf(value).Kind() == reflect.String { - if value == "" { - if reflect.TypeOf(defaultVal).Kind() != reflect.String { - err = errors.New("defaultVal type must be a String if the returnType is a String") - } else { - value = defaultVal.(string) - } - } - } - - return -} - -// Str2html Convert string to template.HTML type. -func Str2html(raw string) template.HTML { - return template.HTML(raw) -} - -// Htmlquote returns quoted html string. -func Htmlquote(text string) string { - //HTML编码为实体符号 - /* - Encodes `text` for raw use in HTML. - >>> htmlquote("<'&\\">") - '<'&">' - */ - - text = html.EscapeString(text) - text = strings.NewReplacer( - `“`, "“", - `”`, "”", - ` `, " ", - ).Replace(text) - - return strings.TrimSpace(text) -} - -// Htmlunquote returns unquoted html string. -func Htmlunquote(text string) string { - //实体符号解释为HTML - /* - Decodes `text` that's HTML quoted. - >>> htmlunquote('<'&">') - '<\\'&">' - */ - - text = html.UnescapeString(text) - - return strings.TrimSpace(text) -} - -// URLFor returns url string with another registered controller handler with params. -// usage: -// -// URLFor(".index") -// print URLFor("index") -// router /login -// print URLFor("login") -// print URLFor("login", "next","/"") -// router /profile/:username -// print UrlFor("profile", ":username","John Doe") -// result: -// / -// /login -// /login?next=/ -// /user/John%20Doe -// -// more detail http://beego.me/docs/mvc/controller/urlbuilding.md -func URLFor(endpoint string, values ...interface{}) string { - return BeeApp.Handlers.URLFor(endpoint, values...) -} - -// AssetsJs returns script tag with src string. -func AssetsJs(text string) template.HTML { - - text = "" - - return template.HTML(text) -} - -// AssetsCSS returns stylesheet link tag with src string. -func AssetsCSS(text string) template.HTML { - - text = "" - - return template.HTML(text) -} - -// ParseForm will parse form values to struct via tag. -// Support for anonymous struct. -func parseFormToStruct(form url.Values, objT reflect.Type, objV reflect.Value) error { - for i := 0; i < objT.NumField(); i++ { - fieldV := objV.Field(i) - if !fieldV.CanSet() { - continue - } - - fieldT := objT.Field(i) - if fieldT.Anonymous && fieldT.Type.Kind() == reflect.Struct { - err := parseFormToStruct(form, fieldT.Type, fieldV) - if err != nil { - return err - } - continue - } - - tags := strings.Split(fieldT.Tag.Get("form"), ",") - var tag string - if len(tags) == 0 || len(tags[0]) == 0 { - tag = fieldT.Name - } else if tags[0] == "-" { - continue - } else { - tag = tags[0] - } - - value := form.Get(tag) - if len(value) == 0 { - continue - } - - switch fieldT.Type.Kind() { - case reflect.Bool: - if strings.ToLower(value) == "on" || strings.ToLower(value) == "1" || strings.ToLower(value) == "yes" { - fieldV.SetBool(true) - continue - } - if strings.ToLower(value) == "off" || strings.ToLower(value) == "0" || strings.ToLower(value) == "no" { - fieldV.SetBool(false) - continue - } - b, err := strconv.ParseBool(value) - if err != nil { - return err - } - fieldV.SetBool(b) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return err - } - fieldV.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - x, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return err - } - fieldV.SetUint(x) - case reflect.Float32, reflect.Float64: - x, err := strconv.ParseFloat(value, 64) - if err != nil { - return err - } - fieldV.SetFloat(x) - case reflect.Interface: - fieldV.Set(reflect.ValueOf(value)) - case reflect.String: - fieldV.SetString(value) - case reflect.Struct: - switch fieldT.Type.String() { - case "time.Time": - var ( - t time.Time - err error - ) - if len(value) >= 25 { - value = value[:25] - t, err = time.ParseInLocation(time.RFC3339, value, time.Local) - } else if len(value) >= 19 { - if strings.Contains(value, "T") { - value = value[:19] - t, err = time.ParseInLocation(formatDateTimeT, value, time.Local) - } else { - value = value[:19] - t, err = time.ParseInLocation(formatDateTime, value, time.Local) - } - } else if len(value) >= 10 { - if len(value) > 10 { - value = value[:10] - } - t, err = time.ParseInLocation(formatDate, value, time.Local) - } else if len(value) >= 8 { - if len(value) > 8 { - value = value[:8] - } - t, err = time.ParseInLocation(formatTime, value, time.Local) - } - if err != nil { - return err - } - fieldV.Set(reflect.ValueOf(t)) - } - case reflect.Slice: - if fieldT.Type == sliceOfInts { - formVals := form[tag] - fieldV.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(int(1))), len(formVals), len(formVals))) - for i := 0; i < len(formVals); i++ { - val, err := strconv.Atoi(formVals[i]) - if err != nil { - return err - } - fieldV.Index(i).SetInt(int64(val)) - } - } else if fieldT.Type == sliceOfStrings { - formVals := form[tag] - fieldV.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("")), len(formVals), len(formVals))) - for i := 0; i < len(formVals); i++ { - fieldV.Index(i).SetString(formVals[i]) - } - } - } - } - return nil -} - -// ParseForm will parse form values to struct via tag. -func ParseForm(form url.Values, obj interface{}) error { - objT := reflect.TypeOf(obj) - objV := reflect.ValueOf(obj) - if !isStructPtr(objT) { - return fmt.Errorf("%v must be a struct pointer", obj) - } - objT = objT.Elem() - objV = objV.Elem() - - return parseFormToStruct(form, objT, objV) -} - -var sliceOfInts = reflect.TypeOf([]int(nil)) -var sliceOfStrings = reflect.TypeOf([]string(nil)) - -var unKind = map[reflect.Kind]bool{ - reflect.Uintptr: true, - reflect.Complex64: true, - reflect.Complex128: true, - reflect.Array: true, - reflect.Chan: true, - reflect.Func: true, - reflect.Map: true, - reflect.Ptr: true, - reflect.Slice: true, - reflect.Struct: true, - reflect.UnsafePointer: true, -} - -// RenderForm will render object to form html. -// obj must be a struct pointer. -func RenderForm(obj interface{}) template.HTML { - objT := reflect.TypeOf(obj) - objV := reflect.ValueOf(obj) - if !isStructPtr(objT) { - return template.HTML("") - } - objT = objT.Elem() - objV = objV.Elem() - - var raw []string - for i := 0; i < objT.NumField(); i++ { - fieldV := objV.Field(i) - if !fieldV.CanSet() || unKind[fieldV.Kind()] { - continue - } - - fieldT := objT.Field(i) - - label, name, fType, id, class, ignored, required := parseFormTag(fieldT) - if ignored { - continue - } - - raw = append(raw, renderFormField(label, name, fType, fieldV.Interface(), id, class, required)) - } - return template.HTML(strings.Join(raw, "
")) -} - -// renderFormField returns a string containing HTML of a single form field. -func renderFormField(label, name, fType string, value interface{}, id string, class string, required bool) string { - if id != "" { - id = " id=\"" + id + "\"" - } - - if class != "" { - class = " class=\"" + class + "\"" - } - - requiredString := "" - if required { - requiredString = " required" - } - - if isValidForInput(fType) { - return fmt.Sprintf(`%v`, label, id, class, name, fType, value, requiredString) - } - - return fmt.Sprintf(`%v<%v%v%v name="%v"%v>%v`, label, fType, id, class, name, requiredString, value, fType) -} - -// isValidForInput checks if fType is a valid value for the `type` property of an HTML input element. -func isValidForInput(fType string) bool { - validInputTypes := strings.Fields("text password checkbox radio submit reset hidden image file button search email url tel number range date month week time datetime datetime-local color") - for _, validType := range validInputTypes { - if fType == validType { - return true - } - } - return false -} - -// parseFormTag takes the stuct-tag of a StructField and parses the `form` value. -// returned are the form label, name-property, type and wether the field should be ignored. -func parseFormTag(fieldT reflect.StructField) (label, name, fType string, id string, class string, ignored bool, required bool) { - tags := strings.Split(fieldT.Tag.Get("form"), ",") - label = fieldT.Name + ": " - name = fieldT.Name - fType = "text" - ignored = false - id = fieldT.Tag.Get("id") - class = fieldT.Tag.Get("class") - - required = false - requiredField := fieldT.Tag.Get("required") - if requiredField != "-" && requiredField != "" { - required, _ = strconv.ParseBool(requiredField) - } - - switch len(tags) { - case 1: - if tags[0] == "-" { - ignored = true - } - if len(tags[0]) > 0 { - name = tags[0] - } - case 2: - if len(tags[0]) > 0 { - name = tags[0] - } - if len(tags[1]) > 0 { - fType = tags[1] - } - case 3: - if len(tags[0]) > 0 { - name = tags[0] - } - if len(tags[1]) > 0 { - fType = tags[1] - } - if len(tags[2]) > 0 { - label = tags[2] - } - } - - return -} - -func isStructPtr(t reflect.Type) bool { - return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct -} - -// go1.2 added template funcs. begin -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - if k1 != k2 { - return false, errBadComparison - } - truth := false - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - if k1 != k2 { - return false, errBadComparison - } - truth := false - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// MapGet getting value from map by keys -// usage: -// Data["m"] = M{ -// "a": 1, -// "1": map[string]float64{ -// "c": 4, -// }, -// } -// -// {{ map_get m "a" }} // return 1 -// {{ map_get m 1 "c" }} // return 4 -func MapGet(arg1 interface{}, arg2 ...interface{}) (interface{}, error) { - arg1Type := reflect.TypeOf(arg1) - arg1Val := reflect.ValueOf(arg1) - - if arg1Type.Kind() == reflect.Map && len(arg2) > 0 { - // check whether arg2[0] type equals to arg1 key type - // if they are different, make conversion - arg2Val := reflect.ValueOf(arg2[0]) - arg2Type := reflect.TypeOf(arg2[0]) - if arg2Type.Kind() != arg1Type.Key().Kind() { - // convert arg2Value to string - var arg2ConvertedVal interface{} - arg2String := fmt.Sprintf("%v", arg2[0]) - - // convert string representation to any other type - switch arg1Type.Key().Kind() { - case reflect.Bool: - arg2ConvertedVal, _ = strconv.ParseBool(arg2String) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - arg2ConvertedVal, _ = strconv.ParseInt(arg2String, 0, 64) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - arg2ConvertedVal, _ = strconv.ParseUint(arg2String, 0, 64) - case reflect.Float32, reflect.Float64: - arg2ConvertedVal, _ = strconv.ParseFloat(arg2String, 64) - case reflect.String: - arg2ConvertedVal = arg2String - default: - arg2ConvertedVal = arg2Val.Interface() - } - arg2Val = reflect.ValueOf(arg2ConvertedVal) - } - - storedVal := arg1Val.MapIndex(arg2Val) - - if storedVal.IsValid() { - var result interface{} - - switch arg1Type.Elem().Kind() { - case reflect.Bool: - result = storedVal.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - result = storedVal.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - result = storedVal.Uint() - case reflect.Float32, reflect.Float64: - result = storedVal.Float() - case reflect.String: - result = storedVal.String() - default: - result = storedVal.Interface() - } - - // if there is more keys, handle this recursively - if len(arg2) > 1 { - return MapGet(result, arg2[1:]...) - } - return result, nil - } - return nil, nil - - } - return nil, nil -} diff --git a/vendor/github.com/astaxie/beego/toolbox/healthcheck.go b/vendor/github.com/astaxie/beego/toolbox/healthcheck.go deleted file mode 100644 index e3544b3ad..000000000 --- a/vendor/github.com/astaxie/beego/toolbox/healthcheck.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package toolbox healthcheck -// -// type DatabaseCheck struct { -// } -// -// func (dc *DatabaseCheck) Check() error { -// if dc.isConnected() { -// return nil -// } else { -// return errors.New("can't connect database") -// } -// } -// -// AddHealthCheck("database",&DatabaseCheck{}) -// -// more docs: http://beego.me/docs/module/toolbox.md -package toolbox - -// AdminCheckList holds health checker map -var AdminCheckList map[string]HealthChecker - -// HealthChecker health checker interface -type HealthChecker interface { - Check() error -} - -// AddHealthCheck add health checker with name string -func AddHealthCheck(name string, hc HealthChecker) { - AdminCheckList[name] = hc -} - -func init() { - AdminCheckList = make(map[string]HealthChecker) -} diff --git a/vendor/github.com/astaxie/beego/toolbox/profile.go b/vendor/github.com/astaxie/beego/toolbox/profile.go deleted file mode 100644 index 06e40ede7..000000000 --- a/vendor/github.com/astaxie/beego/toolbox/profile.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package toolbox - -import ( - "fmt" - "io" - "log" - "os" - "path" - "runtime" - "runtime/debug" - "runtime/pprof" - "strconv" - "time" -) - -var startTime = time.Now() -var pid int - -func init() { - pid = os.Getpid() -} - -// ProcessInput parse input command string -func ProcessInput(input string, w io.Writer) { - switch input { - case "lookup goroutine": - p := pprof.Lookup("goroutine") - p.WriteTo(w, 2) - case "lookup heap": - p := pprof.Lookup("heap") - p.WriteTo(w, 2) - case "lookup threadcreate": - p := pprof.Lookup("threadcreate") - p.WriteTo(w, 2) - case "lookup block": - p := pprof.Lookup("block") - p.WriteTo(w, 2) - case "get cpuprof": - GetCPUProfile(w) - case "get memprof": - MemProf(w) - case "gc summary": - PrintGCSummary(w) - } -} - -// MemProf record memory profile in pprof -func MemProf(w io.Writer) { - filename := "mem-" + strconv.Itoa(pid) + ".memprof" - if f, err := os.Create(filename); err != nil { - fmt.Fprintf(w, "create file %s error %s\n", filename, err.Error()) - log.Fatal("record heap profile failed: ", err) - } else { - runtime.GC() - pprof.WriteHeapProfile(f) - f.Close() - fmt.Fprintf(w, "create heap profile %s \n", filename) - _, fl := path.Split(os.Args[0]) - fmt.Fprintf(w, "Now you can use this to check it: go tool pprof %s %s\n", fl, filename) - } -} - -// GetCPUProfile start cpu profile monitor -func GetCPUProfile(w io.Writer) { - sec := 30 - filename := "cpu-" + strconv.Itoa(pid) + ".pprof" - f, err := os.Create(filename) - if err != nil { - fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err) - log.Fatal("record cpu profile failed: ", err) - } - pprof.StartCPUProfile(f) - time.Sleep(time.Duration(sec) * time.Second) - pprof.StopCPUProfile() - - fmt.Fprintf(w, "create cpu profile %s \n", filename) - _, fl := path.Split(os.Args[0]) - fmt.Fprintf(w, "Now you can use this to check it: go tool pprof %s %s\n", fl, filename) -} - -// PrintGCSummary print gc information to io.Writer -func PrintGCSummary(w io.Writer) { - memStats := &runtime.MemStats{} - runtime.ReadMemStats(memStats) - gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)} - debug.ReadGCStats(gcstats) - - printGC(memStats, gcstats, w) -} - -func printGC(memStats *runtime.MemStats, gcstats *debug.GCStats, w io.Writer) { - - if gcstats.NumGC > 0 { - lastPause := gcstats.Pause[0] - elapsed := time.Now().Sub(startTime) - overhead := float64(gcstats.PauseTotal) / float64(elapsed) * 100 - allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds() - - fmt.Fprintf(w, "NumGC:%d Pause:%s Pause(Avg):%s Overhead:%3.2f%% Alloc:%s Sys:%s Alloc(Rate):%s/s Histogram:%s %s %s \n", - gcstats.NumGC, - toS(lastPause), - toS(avg(gcstats.Pause)), - overhead, - toH(memStats.Alloc), - toH(memStats.Sys), - toH(uint64(allocatedRate)), - toS(gcstats.PauseQuantiles[94]), - toS(gcstats.PauseQuantiles[98]), - toS(gcstats.PauseQuantiles[99])) - } else { - // while GC has disabled - elapsed := time.Now().Sub(startTime) - allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds() - - fmt.Fprintf(w, "Alloc:%s Sys:%s Alloc(Rate):%s/s\n", - toH(memStats.Alloc), - toH(memStats.Sys), - toH(uint64(allocatedRate))) - } -} - -func avg(items []time.Duration) time.Duration { - var sum time.Duration - for _, item := range items { - sum += item - } - return time.Duration(int64(sum) / int64(len(items))) -} - -// format bytes number friendly -func toH(bytes uint64) string { - switch { - case bytes < 1024: - return fmt.Sprintf("%dB", bytes) - case bytes < 1024*1024: - return fmt.Sprintf("%.2fK", float64(bytes)/1024) - case bytes < 1024*1024*1024: - return fmt.Sprintf("%.2fM", float64(bytes)/1024/1024) - default: - return fmt.Sprintf("%.2fG", float64(bytes)/1024/1024/1024) - } -} - -// short string format -func toS(d time.Duration) string { - - u := uint64(d) - if u < uint64(time.Second) { - switch { - case u == 0: - return "0" - case u < uint64(time.Microsecond): - return fmt.Sprintf("%.2fns", float64(u)) - case u < uint64(time.Millisecond): - return fmt.Sprintf("%.2fus", float64(u)/1000) - default: - return fmt.Sprintf("%.2fms", float64(u)/1000/1000) - } - } else { - switch { - case u < uint64(time.Minute): - return fmt.Sprintf("%.2fs", float64(u)/1000/1000/1000) - case u < uint64(time.Hour): - return fmt.Sprintf("%.2fm", float64(u)/1000/1000/1000/60) - default: - return fmt.Sprintf("%.2fh", float64(u)/1000/1000/1000/60/60) - } - } - -} diff --git a/vendor/github.com/astaxie/beego/toolbox/statistics.go b/vendor/github.com/astaxie/beego/toolbox/statistics.go deleted file mode 100644 index d014544c3..000000000 --- a/vendor/github.com/astaxie/beego/toolbox/statistics.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package toolbox - -import ( - "fmt" - "sync" - "time" -) - -// Statistics struct -type Statistics struct { - RequestURL string - RequestController string - RequestNum int64 - MinTime time.Duration - MaxTime time.Duration - TotalTime time.Duration -} - -// URLMap contains several statistics struct to log different data -type URLMap struct { - lock sync.RWMutex - LengthLimit int //limit the urlmap's length if it's equal to 0 there's no limit - urlmap map[string]map[string]*Statistics -} - -// AddStatistics add statistics task. -// it needs request method, request url, request controller and statistics time duration -func (m *URLMap) AddStatistics(requestMethod, requestURL, requestController string, requesttime time.Duration) { - m.lock.Lock() - defer m.lock.Unlock() - if method, ok := m.urlmap[requestURL]; ok { - if s, ok := method[requestMethod]; ok { - s.RequestNum++ - if s.MaxTime < requesttime { - s.MaxTime = requesttime - } - if s.MinTime > requesttime { - s.MinTime = requesttime - } - s.TotalTime += requesttime - } else { - nb := &Statistics{ - RequestURL: requestURL, - RequestController: requestController, - RequestNum: 1, - MinTime: requesttime, - MaxTime: requesttime, - TotalTime: requesttime, - } - m.urlmap[requestURL][requestMethod] = nb - } - - } else { - if m.LengthLimit > 0 && m.LengthLimit <= len(m.urlmap) { - return - } - methodmap := make(map[string]*Statistics) - nb := &Statistics{ - RequestURL: requestURL, - RequestController: requestController, - RequestNum: 1, - MinTime: requesttime, - MaxTime: requesttime, - TotalTime: requesttime, - } - methodmap[requestMethod] = nb - m.urlmap[requestURL] = methodmap - } -} - -// GetMap put url statistics result in io.Writer -func (m *URLMap) GetMap() map[string]interface{} { - m.lock.RLock() - defer m.lock.RUnlock() - - var fields = []string{"requestUrl", "method", "times", "used", "max used", "min used", "avg used"} - - var resultLists [][]string - content := make(map[string]interface{}) - content["Fields"] = fields - - for k, v := range m.urlmap { - for kk, vv := range v { - result := []string{ - fmt.Sprintf("% -50s", k), - fmt.Sprintf("% -10s", kk), - fmt.Sprintf("% -16d", vv.RequestNum), - fmt.Sprintf("%d", vv.TotalTime), - fmt.Sprintf("% -16s", toS(vv.TotalTime)), - fmt.Sprintf("%d", vv.MaxTime), - fmt.Sprintf("% -16s", toS(vv.MaxTime)), - fmt.Sprintf("%d", vv.MinTime), - fmt.Sprintf("% -16s", toS(vv.MinTime)), - fmt.Sprintf("%d", time.Duration(int64(vv.TotalTime)/vv.RequestNum)), - fmt.Sprintf("% -16s", toS(time.Duration(int64(vv.TotalTime)/vv.RequestNum))), - } - resultLists = append(resultLists, result) - } - } - content["Data"] = resultLists - return content -} - -// GetMapData return all mapdata -func (m *URLMap) GetMapData() []map[string]interface{} { - m.lock.Lock() - defer m.lock.Unlock() - - var resultLists []map[string]interface{} - - for k, v := range m.urlmap { - for kk, vv := range v { - result := map[string]interface{}{ - "request_url": k, - "method": kk, - "times": vv.RequestNum, - "total_time": toS(vv.TotalTime), - "max_time": toS(vv.MaxTime), - "min_time": toS(vv.MinTime), - "avg_time": toS(time.Duration(int64(vv.TotalTime) / vv.RequestNum)), - } - resultLists = append(resultLists, result) - } - } - return resultLists -} - -// StatisticsMap hosld global statistics data map -var StatisticsMap *URLMap - -func init() { - StatisticsMap = &URLMap{ - urlmap: make(map[string]map[string]*Statistics), - } -} diff --git a/vendor/github.com/astaxie/beego/toolbox/task.go b/vendor/github.com/astaxie/beego/toolbox/task.go deleted file mode 100644 index 7e841e893..000000000 --- a/vendor/github.com/astaxie/beego/toolbox/task.go +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package toolbox - -import ( - "log" - "math" - "sort" - "strconv" - "strings" - "time" -) - -// bounds provides a range of acceptable values (plus a map of name to value). -type bounds struct { - min, max uint - names map[string]uint -} - -// The bounds for each field. -var ( - AdminTaskList map[string]Tasker - stop chan bool - changed chan bool - isstart bool - seconds = bounds{0, 59, nil} - minutes = bounds{0, 59, nil} - hours = bounds{0, 23, nil} - days = bounds{1, 31, nil} - months = bounds{1, 12, map[string]uint{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "oct": 10, - "nov": 11, - "dec": 12, - }} - weeks = bounds{0, 6, map[string]uint{ - "sun": 0, - "mon": 1, - "tue": 2, - "wed": 3, - "thu": 4, - "fri": 5, - "sat": 6, - }} -) - -const ( - // Set the top bit if a star was included in the expression. - starBit = 1 << 63 -) - -// Schedule time taks schedule -type Schedule struct { - Second uint64 - Minute uint64 - Hour uint64 - Day uint64 - Month uint64 - Week uint64 -} - -// TaskFunc task func type -type TaskFunc func() error - -// Tasker task interface -type Tasker interface { - GetSpec() string - GetStatus() string - Run() error - SetNext(time.Time) - GetNext() time.Time - SetPrev(time.Time) - GetPrev() time.Time -} - -// task error -type taskerr struct { - t time.Time - errinfo string -} - -// Task task struct -type Task struct { - Taskname string - Spec *Schedule - SpecStr string - DoFunc TaskFunc - Prev time.Time - Next time.Time - Errlist []*taskerr // like errtime:errinfo - ErrLimit int // max length for the errlist, 0 stand for no limit -} - -// NewTask add new task with name, time and func -func NewTask(tname string, spec string, f TaskFunc) *Task { - - task := &Task{ - Taskname: tname, - DoFunc: f, - ErrLimit: 100, - SpecStr: spec, - } - task.SetCron(spec) - return task -} - -// GetSpec get spec string -func (t *Task) GetSpec() string { - return t.SpecStr -} - -// GetStatus get current task status -func (t *Task) GetStatus() string { - var str string - for _, v := range t.Errlist { - str += v.t.String() + ":" + v.errinfo + "
" - } - return str -} - -// Run run all tasks -func (t *Task) Run() error { - err := t.DoFunc() - if err != nil { - if t.ErrLimit > 0 && t.ErrLimit > len(t.Errlist) { - t.Errlist = append(t.Errlist, &taskerr{t: t.Next, errinfo: err.Error()}) - } - } - return err -} - -// SetNext set next time for this task -func (t *Task) SetNext(now time.Time) { - t.Next = t.Spec.Next(now) -} - -// GetNext get the next call time of this task -func (t *Task) GetNext() time.Time { - return t.Next -} - -// SetPrev set prev time of this task -func (t *Task) SetPrev(now time.Time) { - t.Prev = now -} - -// GetPrev get prev time of this task -func (t *Task) GetPrev() time.Time { - return t.Prev -} - -// six columns mean: -// second:0-59 -// minute:0-59 -// hour:1-23 -// day:1-31 -// month:1-12 -// week:0-6(0 means Sunday) - -// SetCron some signals: -// *: any time -// ,:  separate signal -//   -:duration -// /n : do as n times of time duration -///////////////////////////////////////////////////////// -// 0/30 * * * * * every 30s -// 0 43 21 * * * 21:43 -// 0 15 05 * * *    05:15 -// 0 0 17 * * * 17:00 -// 0 0 17 * * 1 17:00 in every Monday -// 0 0,10 17 * * 0,2,3 17:00 and 17:10 in every Sunday, Tuesday and Wednesday -// 0 0-10 17 1 * * 17:00 to 17:10 in 1 min duration each time on the first day of month -// 0 0 0 1,15 * 1 0:00 on the 1st day and 15th day of month -// 0 42 4 1 * *     4:42 on the 1st day of month -// 0 0 21 * * 1-6   21:00 from Monday to Saturday -// 0 0,10,20,30,40,50 * * * *  every 10 min duration -// 0 */10 * * * *        every 10 min duration -// 0 * 1 * * *         1:00 to 1:59 in 1 min duration each time -// 0 0 1 * * *         1:00 -// 0 0 */1 * * *        0 min of hour in 1 hour duration -// 0 0 * * * *         0 min of hour in 1 hour duration -// 0 2 8-20/3 * * *       8:02, 11:02, 14:02, 17:02, 20:02 -// 0 30 5 1,15 * *       5:30 on the 1st day and 15th day of month -func (t *Task) SetCron(spec string) { - t.Spec = t.parse(spec) -} - -func (t *Task) parse(spec string) *Schedule { - if len(spec) > 0 && spec[0] == '@' { - return t.parseSpec(spec) - } - // Split on whitespace. We require 5 or 6 fields. - // (second) (minute) (hour) (day of month) (month) (day of week, optional) - fields := strings.Fields(spec) - if len(fields) != 5 && len(fields) != 6 { - log.Panicf("Expected 5 or 6 fields, found %d: %s", len(fields), spec) - } - - // If a sixth field is not provided (DayOfWeek), then it is equivalent to star. - if len(fields) == 5 { - fields = append(fields, "*") - } - - schedule := &Schedule{ - Second: getField(fields[0], seconds), - Minute: getField(fields[1], minutes), - Hour: getField(fields[2], hours), - Day: getField(fields[3], days), - Month: getField(fields[4], months), - Week: getField(fields[5], weeks), - } - - return schedule -} - -func (t *Task) parseSpec(spec string) *Schedule { - switch spec { - case "@yearly", "@annually": - return &Schedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Day: 1 << days.min, - Month: 1 << months.min, - Week: all(weeks), - } - - case "@monthly": - return &Schedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Day: 1 << days.min, - Month: all(months), - Week: all(weeks), - } - - case "@weekly": - return &Schedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Day: all(days), - Month: all(months), - Week: 1 << weeks.min, - } - - case "@daily", "@midnight": - return &Schedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Day: all(days), - Month: all(months), - Week: all(weeks), - } - - case "@hourly": - return &Schedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: all(hours), - Day: all(days), - Month: all(months), - Week: all(weeks), - } - } - log.Panicf("Unrecognized descriptor: %s", spec) - return nil -} - -// Next set schedule to next time -func (s *Schedule) Next(t time.Time) time.Time { - - // Start at the earliest possible time (the upcoming second). - t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) - - // This flag indicates whether a field has been incremented. - added := false - - // If no time is found within five years, return zero. - yearLimit := t.Year() + 5 - -WRAP: - if t.Year() > yearLimit { - return time.Time{} - } - - // Find the first applicable month. - // If it's this month, then do nothing. - for 1< 0 - dowMatch = 1< 0 - ) - - if s.Day&starBit > 0 || s.Week&starBit > 0 { - return domMatch && dowMatch - } - return domMatch || dowMatch -} - -// StartTask start all tasks -func StartTask() { - if isstart { - //If already started, no need to start another goroutine. - return - } - isstart = true - go run() -} - -func run() { - now := time.Now().Local() - for _, t := range AdminTaskList { - t.SetNext(now) - } - - for { - sortList := NewMapSorter(AdminTaskList) - sortList.Sort() - var effective time.Time - if len(AdminTaskList) == 0 || sortList.Vals[0].GetNext().IsZero() { - // If there are no entries yet, just sleep - it still handles new entries - // and stop requests. - effective = now.AddDate(10, 0, 0) - } else { - effective = sortList.Vals[0].GetNext() - } - select { - case now = <-time.After(effective.Sub(now)): - // Run every entry whose next time was this effective time. - for _, e := range sortList.Vals { - if e.GetNext() != effective { - break - } - go e.Run() - e.SetPrev(e.GetNext()) - e.SetNext(effective) - } - continue - case <-changed: - now = time.Now().Local() - for _, t := range AdminTaskList { - t.SetNext(now) - } - continue - case <-stop: - return - } - } -} - -// StopTask stop all tasks -func StopTask() { - if isstart { - isstart = false - stop <- true - } - -} - -// AddTask add task with name -func AddTask(taskname string, t Tasker) { - t.SetNext(time.Now().Local()) - AdminTaskList[taskname] = t - if isstart { - changed <- true - } -} - -// DeleteTask delete task with name -func DeleteTask(taskname string) { - delete(AdminTaskList, taskname) - if isstart { - changed <- true - } -} - -// MapSorter sort map for tasker -type MapSorter struct { - Keys []string - Vals []Tasker -} - -// NewMapSorter create new tasker map -func NewMapSorter(m map[string]Tasker) *MapSorter { - ms := &MapSorter{ - Keys: make([]string, 0, len(m)), - Vals: make([]Tasker, 0, len(m)), - } - for k, v := range m { - ms.Keys = append(ms.Keys, k) - ms.Vals = append(ms.Vals, v) - } - return ms -} - -// Sort sort tasker map -func (ms *MapSorter) Sort() { - sort.Sort(ms) -} - -func (ms *MapSorter) Len() int { return len(ms.Keys) } -func (ms *MapSorter) Less(i, j int) bool { - if ms.Vals[i].GetNext().IsZero() { - return false - } - if ms.Vals[j].GetNext().IsZero() { - return true - } - return ms.Vals[i].GetNext().Before(ms.Vals[j].GetNext()) -} -func (ms *MapSorter) Swap(i, j int) { - ms.Vals[i], ms.Vals[j] = ms.Vals[j], ms.Vals[i] - ms.Keys[i], ms.Keys[j] = ms.Keys[j], ms.Keys[i] -} - -func getField(field string, r bounds) uint64 { - // list = range {"," range} - var bits uint64 - ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) - for _, expr := range ranges { - bits |= getRange(expr, r) - } - return bits -} - -// getRange returns the bits indicated by the given expression: -// number | number "-" number [ "/" number ] -func getRange(expr string, r bounds) uint64 { - - var ( - start, end, step uint - rangeAndStep = strings.Split(expr, "/") - lowAndHigh = strings.Split(rangeAndStep[0], "-") - singleDigit = len(lowAndHigh) == 1 - ) - - var extrastar uint64 - if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { - start = r.min - end = r.max - extrastar = starBit - } else { - start = parseIntOrName(lowAndHigh[0], r.names) - switch len(lowAndHigh) { - case 1: - end = start - case 2: - end = parseIntOrName(lowAndHigh[1], r.names) - default: - log.Panicf("Too many hyphens: %s", expr) - } - } - - switch len(rangeAndStep) { - case 1: - step = 1 - case 2: - step = mustParseInt(rangeAndStep[1]) - - // Special handling: "N/step" means "N-max/step". - if singleDigit { - end = r.max - } - default: - log.Panicf("Too many slashes: %s", expr) - } - - if start < r.min { - log.Panicf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) - } - if end > r.max { - log.Panicf("End of range (%d) above maximum (%d): %s", end, r.max, expr) - } - if start > end { - log.Panicf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) - } - - return getBits(start, end, step) | extrastar -} - -// parseIntOrName returns the (possibly-named) integer contained in expr. -func parseIntOrName(expr string, names map[string]uint) uint { - if names != nil { - if namedInt, ok := names[strings.ToLower(expr)]; ok { - return namedInt - } - } - return mustParseInt(expr) -} - -// mustParseInt parses the given expression as an int or panics. -func mustParseInt(expr string) uint { - num, err := strconv.Atoi(expr) - if err != nil { - log.Panicf("Failed to parse int from %s: %s", expr, err) - } - if num < 0 { - log.Panicf("Negative number (%d) not allowed: %s", num, expr) - } - - return uint(num) -} - -// getBits sets all bits in the range [min, max], modulo the given step size. -func getBits(min, max, step uint) uint64 { - var bits uint64 - - // If step is 1, use shifts. - if step == 1 { - return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) - } - - // Else, use a simple loop. - for i := min; i <= max; i += step { - bits |= 1 << i - } - return bits -} - -// all returns all bits within the given bounds. (plus the star bit) -func all(r bounds) uint64 { - return getBits(r.min, r.max, 1) | starBit -} - -func init() { - AdminTaskList = make(map[string]Tasker) - stop = make(chan bool) - changed = make(chan bool) -} diff --git a/vendor/github.com/astaxie/beego/tree.go b/vendor/github.com/astaxie/beego/tree.go deleted file mode 100644 index 9e53003bc..000000000 --- a/vendor/github.com/astaxie/beego/tree.go +++ /dev/null @@ -1,585 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package beego - -import ( - "path" - "regexp" - "strings" - - "github.com/astaxie/beego/context" - "github.com/astaxie/beego/utils" -) - -var ( - allowSuffixExt = []string{".json", ".xml", ".html"} -) - -// Tree has three elements: FixRouter/wildcard/leaves -// fixRouter stores Fixed Router -// wildcard stores params -// leaves store the endpoint information -type Tree struct { - //prefix set for static router - prefix string - //search fix route first - fixrouters []*Tree - //if set, failure to match fixrouters search then search wildcard - wildcard *Tree - //if set, failure to match wildcard search - leaves []*leafInfo -} - -// NewTree return a new Tree -func NewTree() *Tree { - return &Tree{} -} - -// AddTree will add tree to the exist Tree -// prefix should has no params -func (t *Tree) AddTree(prefix string, tree *Tree) { - t.addtree(splitPath(prefix), tree, nil, "") -} - -func (t *Tree) addtree(segments []string, tree *Tree, wildcards []string, reg string) { - if len(segments) == 0 { - panic("prefix should has path") - } - seg := segments[0] - iswild, params, regexpStr := splitSegment(seg) - // if it's ? meaning can igone this, so add one more rule for it - if len(params) > 0 && params[0] == ":" { - params = params[1:] - if len(segments[1:]) > 0 { - t.addtree(segments[1:], tree, append(wildcards, params...), reg) - } else { - filterTreeWithPrefix(tree, wildcards, reg) - } - } - //Rule: /login/*/access match /login/2009/11/access - //if already has *, and when loop the access, should as a regexpStr - if !iswild && utils.InSlice(":splat", wildcards) { - iswild = true - regexpStr = seg - } - //Rule: /user/:id/* - if seg == "*" && len(wildcards) > 0 && reg == "" { - regexpStr = "(.+)" - } - if len(segments) == 1 { - if iswild { - if regexpStr != "" { - if reg == "" { - rr := "" - for _, w := range wildcards { - if w == ":splat" { - rr = rr + "(.+)/" - } else { - rr = rr + "([^/]+)/" - } - } - regexpStr = rr + regexpStr - } else { - regexpStr = "/" + regexpStr - } - } else if reg != "" { - if seg == "*.*" { - regexpStr = "([^.]+).(.+)" - } else { - for _, w := range params { - if w == "." || w == ":" { - continue - } - regexpStr = "([^/]+)/" + regexpStr - } - } - } - reg = strings.Trim(reg+"/"+regexpStr, "/") - filterTreeWithPrefix(tree, append(wildcards, params...), reg) - t.wildcard = tree - } else { - reg = strings.Trim(reg+"/"+regexpStr, "/") - filterTreeWithPrefix(tree, append(wildcards, params...), reg) - tree.prefix = seg - t.fixrouters = append(t.fixrouters, tree) - } - return - } - - if iswild { - if t.wildcard == nil { - t.wildcard = NewTree() - } - if regexpStr != "" { - if reg == "" { - rr := "" - for _, w := range wildcards { - if w == ":splat" { - rr = rr + "(.+)/" - } else { - rr = rr + "([^/]+)/" - } - } - regexpStr = rr + regexpStr - } else { - regexpStr = "/" + regexpStr - } - } else if reg != "" { - if seg == "*.*" { - regexpStr = "([^.]+).(.+)" - params = params[1:] - } else { - for range params { - regexpStr = "([^/]+)/" + regexpStr - } - } - } else { - if seg == "*.*" { - params = params[1:] - } - } - reg = strings.TrimRight(strings.TrimRight(reg, "/")+"/"+regexpStr, "/") - t.wildcard.addtree(segments[1:], tree, append(wildcards, params...), reg) - } else { - subTree := NewTree() - subTree.prefix = seg - t.fixrouters = append(t.fixrouters, subTree) - subTree.addtree(segments[1:], tree, append(wildcards, params...), reg) - } -} - -func filterTreeWithPrefix(t *Tree, wildcards []string, reg string) { - for _, v := range t.fixrouters { - filterTreeWithPrefix(v, wildcards, reg) - } - if t.wildcard != nil { - filterTreeWithPrefix(t.wildcard, wildcards, reg) - } - for _, l := range t.leaves { - if reg != "" { - if l.regexps != nil { - l.wildcards = append(wildcards, l.wildcards...) - l.regexps = regexp.MustCompile("^" + reg + "/" + strings.Trim(l.regexps.String(), "^$") + "$") - } else { - for _, v := range l.wildcards { - if v == ":splat" { - reg = reg + "/(.+)" - } else { - reg = reg + "/([^/]+)" - } - } - l.regexps = regexp.MustCompile("^" + reg + "$") - l.wildcards = append(wildcards, l.wildcards...) - } - } else { - l.wildcards = append(wildcards, l.wildcards...) - if l.regexps != nil { - for _, w := range wildcards { - if w == ":splat" { - reg = "(.+)/" + reg - } else { - reg = "([^/]+)/" + reg - } - } - l.regexps = regexp.MustCompile("^" + reg + strings.Trim(l.regexps.String(), "^$") + "$") - } - } - } -} - -// AddRouter call addseg function -func (t *Tree) AddRouter(pattern string, runObject interface{}) { - t.addseg(splitPath(pattern), runObject, nil, "") -} - -// "/" -// "admin" -> -func (t *Tree) addseg(segments []string, route interface{}, wildcards []string, reg string) { - if len(segments) == 0 { - if reg != "" { - t.leaves = append(t.leaves, &leafInfo{runObject: route, wildcards: wildcards, regexps: regexp.MustCompile("^" + reg + "$")}) - } else { - t.leaves = append(t.leaves, &leafInfo{runObject: route, wildcards: wildcards}) - } - } else { - seg := segments[0] - iswild, params, regexpStr := splitSegment(seg) - // if it's ? meaning can igone this, so add one more rule for it - if len(params) > 0 && params[0] == ":" { - t.addseg(segments[1:], route, wildcards, reg) - params = params[1:] - } - //Rule: /login/*/access match /login/2009/11/access - //if already has *, and when loop the access, should as a regexpStr - if !iswild && utils.InSlice(":splat", wildcards) { - iswild = true - regexpStr = seg - } - //Rule: /user/:id/* - if seg == "*" && len(wildcards) > 0 && reg == "" { - regexpStr = "(.+)" - } - if iswild { - if t.wildcard == nil { - t.wildcard = NewTree() - } - if regexpStr != "" { - if reg == "" { - rr := "" - for _, w := range wildcards { - if w == ":splat" { - rr = rr + "(.+)/" - } else { - rr = rr + "([^/]+)/" - } - } - regexpStr = rr + regexpStr - } else { - regexpStr = "/" + regexpStr - } - } else if reg != "" { - if seg == "*.*" { - regexpStr = "/([^.]+).(.+)" - params = params[1:] - } else { - for range params { - regexpStr = "/([^/]+)" + regexpStr - } - } - } else { - if seg == "*.*" { - params = params[1:] - } - } - t.wildcard.addseg(segments[1:], route, append(wildcards, params...), reg+regexpStr) - } else { - var subTree *Tree - for _, sub := range t.fixrouters { - if sub.prefix == seg { - subTree = sub - break - } - } - if subTree == nil { - subTree = NewTree() - subTree.prefix = seg - t.fixrouters = append(t.fixrouters, subTree) - } - subTree.addseg(segments[1:], route, wildcards, reg) - } - } -} - -// Match router to runObject & params -func (t *Tree) Match(pattern string, ctx *context.Context) (runObject interface{}) { - if len(pattern) == 0 || pattern[0] != '/' { - return nil - } - w := make([]string, 0, 20) - return t.match(pattern[1:], pattern, w, ctx) -} - -func (t *Tree) match(treePattern string, pattern string, wildcardValues []string, ctx *context.Context) (runObject interface{}) { - if len(pattern) > 0 { - i := 0 - for ; i < len(pattern) && pattern[i] == '/'; i++ { - } - pattern = pattern[i:] - } - // Handle leaf nodes: - if len(pattern) == 0 { - for _, l := range t.leaves { - if ok := l.match(treePattern, wildcardValues, ctx); ok { - return l.runObject - } - } - if t.wildcard != nil { - for _, l := range t.wildcard.leaves { - if ok := l.match(treePattern, wildcardValues, ctx); ok { - return l.runObject - } - } - } - return nil - } - var seg string - i, l := 0, len(pattern) - for ; i < l && pattern[i] != '/'; i++ { - } - if i == 0 { - seg = pattern - pattern = "" - } else { - seg = pattern[:i] - pattern = pattern[i:] - } - for _, subTree := range t.fixrouters { - if subTree.prefix == seg { - if len(pattern) != 0 && pattern[0] == '/' { - treePattern = pattern[1:] - } else { - treePattern = pattern - } - runObject = subTree.match(treePattern, pattern, wildcardValues, ctx) - if runObject != nil { - break - } - } - } - if runObject == nil && len(t.fixrouters) > 0 { - // Filter the .json .xml .html extension - for _, str := range allowSuffixExt { - if strings.HasSuffix(seg, str) { - for _, subTree := range t.fixrouters { - if subTree.prefix == seg[:len(seg)-len(str)] { - runObject = subTree.match(treePattern, pattern, wildcardValues, ctx) - if runObject != nil { - ctx.Input.SetParam(":ext", str[1:]) - } - } - } - } - } - } - if runObject == nil && t.wildcard != nil { - runObject = t.wildcard.match(treePattern, pattern, append(wildcardValues, seg), ctx) - } - - if runObject == nil && len(t.leaves) > 0 { - wildcardValues = append(wildcardValues, seg) - start, i := 0, 0 - for ; i < len(pattern); i++ { - if pattern[i] == '/' { - if i != 0 && start < len(pattern) { - wildcardValues = append(wildcardValues, pattern[start:i]) - } - start = i + 1 - continue - } - } - if start > 0 { - wildcardValues = append(wildcardValues, pattern[start:i]) - } - for _, l := range t.leaves { - if ok := l.match(treePattern, wildcardValues, ctx); ok { - return l.runObject - } - } - } - return runObject -} - -type leafInfo struct { - // names of wildcards that lead to this leaf. eg, ["id" "name"] for the wildcard ":id" and ":name" - wildcards []string - - // if the leaf is regexp - regexps *regexp.Regexp - - runObject interface{} -} - -func (leaf *leafInfo) match(treePattern string, wildcardValues []string, ctx *context.Context) (ok bool) { - //fmt.Println("Leaf:", wildcardValues, leaf.wildcards, leaf.regexps) - if leaf.regexps == nil { - if len(wildcardValues) == 0 && len(leaf.wildcards) == 0 { // static path - return true - } - // match * - if len(leaf.wildcards) == 1 && leaf.wildcards[0] == ":splat" { - ctx.Input.SetParam(":splat", treePattern) - return true - } - // match *.* or :id - if len(leaf.wildcards) >= 2 && leaf.wildcards[len(leaf.wildcards)-2] == ":path" && leaf.wildcards[len(leaf.wildcards)-1] == ":ext" { - if len(leaf.wildcards) == 2 { - lastone := wildcardValues[len(wildcardValues)-1] - strs := strings.SplitN(lastone, ".", 2) - if len(strs) == 2 { - ctx.Input.SetParam(":ext", strs[1]) - } - ctx.Input.SetParam(":path", path.Join(path.Join(wildcardValues[:len(wildcardValues)-1]...), strs[0])) - return true - } else if len(wildcardValues) < 2 { - return false - } - var index int - for index = 0; index < len(leaf.wildcards)-2; index++ { - ctx.Input.SetParam(leaf.wildcards[index], wildcardValues[index]) - } - lastone := wildcardValues[len(wildcardValues)-1] - strs := strings.SplitN(lastone, ".", 2) - if len(strs) == 2 { - ctx.Input.SetParam(":ext", strs[1]) - } - if index > (len(wildcardValues) - 1) { - ctx.Input.SetParam(":path", "") - } else { - ctx.Input.SetParam(":path", path.Join(path.Join(wildcardValues[index:len(wildcardValues)-1]...), strs[0])) - } - return true - } - // match :id - if len(leaf.wildcards) != len(wildcardValues) { - return false - } - for j, v := range leaf.wildcards { - ctx.Input.SetParam(v, wildcardValues[j]) - } - return true - } - - if !leaf.regexps.MatchString(path.Join(wildcardValues...)) { - return false - } - matches := leaf.regexps.FindStringSubmatch(path.Join(wildcardValues...)) - for i, match := range matches[1:] { - if i < len(leaf.wildcards) { - ctx.Input.SetParam(leaf.wildcards[i], match) - } - } - return true -} - -// "/" -> [] -// "/admin" -> ["admin"] -// "/admin/" -> ["admin"] -// "/admin/users" -> ["admin", "users"] -func splitPath(key string) []string { - key = strings.Trim(key, "/ ") - if key == "" { - return []string{} - } - return strings.Split(key, "/") -} - -// "admin" -> false, nil, "" -// ":id" -> true, [:id], "" -// "?:id" -> true, [: :id], "" : meaning can empty -// ":id:int" -> true, [:id], ([0-9]+) -// ":name:string" -> true, [:name], ([\w]+) -// ":id([0-9]+)" -> true, [:id], ([0-9]+) -// ":id([0-9]+)_:name" -> true, [:id :name], ([0-9]+)_(.+) -// "cms_:id_:page.html" -> true, [:id_ :page], cms_(.+)(.+).html -// "cms_:id(.+)_:page.html" -> true, [:id :page], cms_(.+)_(.+).html -// "*" -> true, [:splat], "" -// "*.*" -> true,[. :path :ext], "" . meaning separator -func splitSegment(key string) (bool, []string, string) { - if strings.HasPrefix(key, "*") { - if key == "*.*" { - return true, []string{".", ":path", ":ext"}, "" - } - return true, []string{":splat"}, "" - } - if strings.ContainsAny(key, ":") { - var paramsNum int - var out []rune - var start bool - var startexp bool - var param []rune - var expt []rune - var skipnum int - params := []string{} - reg := regexp.MustCompile(`[a-zA-Z0-9_]+`) - for i, v := range key { - if skipnum > 0 { - skipnum-- - continue - } - if start { - //:id:int and :name:string - if v == ':' { - if len(key) >= i+4 { - if key[i+1:i+4] == "int" { - out = append(out, []rune("([0-9]+)")...) - params = append(params, ":"+string(param)) - start = false - startexp = false - skipnum = 3 - param = make([]rune, 0) - paramsNum++ - continue - } - } - if len(key) >= i+7 { - if key[i+1:i+7] == "string" { - out = append(out, []rune(`([\w]+)`)...) - params = append(params, ":"+string(param)) - paramsNum++ - start = false - startexp = false - skipnum = 6 - param = make([]rune, 0) - continue - } - } - } - // params only support a-zA-Z0-9 - if reg.MatchString(string(v)) { - param = append(param, v) - continue - } - if v != '(' { - out = append(out, []rune(`(.+)`)...) - params = append(params, ":"+string(param)) - param = make([]rune, 0) - paramsNum++ - start = false - startexp = false - } - } - if startexp { - if v != ')' { - expt = append(expt, v) - continue - } - } - // Escape Sequence '\' - if i > 0 && key[i-1] == '\\' { - out = append(out, v) - } else if v == ':' { - param = make([]rune, 0) - start = true - } else if v == '(' { - startexp = true - start = false - if len(param) > 0 { - params = append(params, ":"+string(param)) - param = make([]rune, 0) - } - paramsNum++ - expt = make([]rune, 0) - expt = append(expt, '(') - } else if v == ')' { - startexp = false - expt = append(expt, ')') - out = append(out, expt...) - param = make([]rune, 0) - } else if v == '?' { - params = append(params, ":") - } else { - out = append(out, v) - } - } - if len(param) > 0 { - if paramsNum > 0 { - out = append(out, []rune(`(.+)`)...) - } - params = append(params, ":"+string(param)) - } - return true, params, string(out) - } - return false, nil, "" -} diff --git a/vendor/github.com/astaxie/beego/utils/caller.go b/vendor/github.com/astaxie/beego/utils/caller.go deleted file mode 100644 index 73c52a620..000000000 --- a/vendor/github.com/astaxie/beego/utils/caller.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "reflect" - "runtime" -) - -// GetFuncName get function name -func GetFuncName(i interface{}) string { - return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() -} diff --git a/vendor/github.com/astaxie/beego/utils/debug.go b/vendor/github.com/astaxie/beego/utils/debug.go deleted file mode 100644 index 93c27b70d..000000000 --- a/vendor/github.com/astaxie/beego/utils/debug.go +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "bytes" - "fmt" - "log" - "reflect" - "runtime" -) - -var ( - dunno = []byte("???") - centerDot = []byte("·") - dot = []byte(".") -) - -type pointerInfo struct { - prev *pointerInfo - n int - addr uintptr - pos int - used []int -} - -// Display print the data in console -func Display(data ...interface{}) { - display(true, data...) -} - -// GetDisplayString return data print string -func GetDisplayString(data ...interface{}) string { - return display(false, data...) -} - -func display(displayed bool, data ...interface{}) string { - var pc, file, line, ok = runtime.Caller(2) - - if !ok { - return "" - } - - var buf = new(bytes.Buffer) - - fmt.Fprintf(buf, "[Debug] at %s() [%s:%d]\n", function(pc), file, line) - - fmt.Fprintf(buf, "\n[Variables]\n") - - for i := 0; i < len(data); i += 2 { - var output = fomateinfo(len(data[i].(string))+3, data[i+1]) - fmt.Fprintf(buf, "%s = %s", data[i], output) - } - - if displayed { - log.Print(buf) - } - return buf.String() -} - -// return data dump and format bytes -func fomateinfo(headlen int, data ...interface{}) []byte { - var buf = new(bytes.Buffer) - - if len(data) > 1 { - fmt.Fprint(buf, " ") - - fmt.Fprint(buf, "[") - - fmt.Fprintln(buf) - } - - for k, v := range data { - var buf2 = new(bytes.Buffer) - var pointers *pointerInfo - var interfaces = make([]reflect.Value, 0, 10) - - printKeyValue(buf2, reflect.ValueOf(v), &pointers, &interfaces, nil, true, " ", 1) - - if k < len(data)-1 { - fmt.Fprint(buf2, ", ") - } - - fmt.Fprintln(buf2) - - buf.Write(buf2.Bytes()) - } - - if len(data) > 1 { - fmt.Fprintln(buf) - - fmt.Fprint(buf, " ") - - fmt.Fprint(buf, "]") - } - - return buf.Bytes() -} - -// check data is golang basic type -func isSimpleType(val reflect.Value, kind reflect.Kind, pointers **pointerInfo, interfaces *[]reflect.Value) bool { - switch kind { - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.Complex64, reflect.Complex128: - return true - case reflect.String: - return true - case reflect.Chan: - return true - case reflect.Invalid: - return true - case reflect.Interface: - for _, in := range *interfaces { - if reflect.DeepEqual(in, val) { - return true - } - } - return false - case reflect.UnsafePointer: - if val.IsNil() { - return true - } - - var elem = val.Elem() - - if isSimpleType(elem, elem.Kind(), pointers, interfaces) { - return true - } - - var addr = val.Elem().UnsafeAddr() - - for p := *pointers; p != nil; p = p.prev { - if addr == p.addr { - return true - } - } - - return false - } - - return false -} - -// dump value -func printKeyValue(buf *bytes.Buffer, val reflect.Value, pointers **pointerInfo, interfaces *[]reflect.Value, structFilter func(string, string) bool, formatOutput bool, indent string, level int) { - var t = val.Kind() - - switch t { - case reflect.Bool: - fmt.Fprint(buf, val.Bool()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - fmt.Fprint(buf, val.Int()) - case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: - fmt.Fprint(buf, val.Uint()) - case reflect.Float32, reflect.Float64: - fmt.Fprint(buf, val.Float()) - case reflect.Complex64, reflect.Complex128: - fmt.Fprint(buf, val.Complex()) - case reflect.UnsafePointer: - fmt.Fprintf(buf, "unsafe.Pointer(0x%X)", val.Pointer()) - case reflect.Ptr: - if val.IsNil() { - fmt.Fprint(buf, "nil") - return - } - - var addr = val.Elem().UnsafeAddr() - - for p := *pointers; p != nil; p = p.prev { - if addr == p.addr { - p.used = append(p.used, buf.Len()) - fmt.Fprintf(buf, "0x%X", addr) - return - } - } - - *pointers = &pointerInfo{ - prev: *pointers, - addr: addr, - pos: buf.Len(), - used: make([]int, 0), - } - - fmt.Fprint(buf, "&") - - printKeyValue(buf, val.Elem(), pointers, interfaces, structFilter, formatOutput, indent, level) - case reflect.String: - fmt.Fprint(buf, "\"", val.String(), "\"") - case reflect.Interface: - var value = val.Elem() - - if !value.IsValid() { - fmt.Fprint(buf, "nil") - } else { - for _, in := range *interfaces { - if reflect.DeepEqual(in, val) { - fmt.Fprint(buf, "repeat") - return - } - } - - *interfaces = append(*interfaces, val) - - printKeyValue(buf, value, pointers, interfaces, structFilter, formatOutput, indent, level+1) - } - case reflect.Struct: - var t = val.Type() - - fmt.Fprint(buf, t) - fmt.Fprint(buf, "{") - - for i := 0; i < val.NumField(); i++ { - if formatOutput { - fmt.Fprintln(buf) - } else { - fmt.Fprint(buf, " ") - } - - var name = t.Field(i).Name - - if formatOutput { - for ind := 0; ind < level; ind++ { - fmt.Fprint(buf, indent) - } - } - - fmt.Fprint(buf, name) - fmt.Fprint(buf, ": ") - - if structFilter != nil && structFilter(t.String(), name) { - fmt.Fprint(buf, "ignore") - } else { - printKeyValue(buf, val.Field(i), pointers, interfaces, structFilter, formatOutput, indent, level+1) - } - - fmt.Fprint(buf, ",") - } - - if formatOutput { - fmt.Fprintln(buf) - - for ind := 0; ind < level-1; ind++ { - fmt.Fprint(buf, indent) - } - } else { - fmt.Fprint(buf, " ") - } - - fmt.Fprint(buf, "}") - case reflect.Array, reflect.Slice: - fmt.Fprint(buf, val.Type()) - fmt.Fprint(buf, "{") - - var allSimple = true - - for i := 0; i < val.Len(); i++ { - var elem = val.Index(i) - - var isSimple = isSimpleType(elem, elem.Kind(), pointers, interfaces) - - if !isSimple { - allSimple = false - } - - if formatOutput && !isSimple { - fmt.Fprintln(buf) - } else { - fmt.Fprint(buf, " ") - } - - if formatOutput && !isSimple { - for ind := 0; ind < level; ind++ { - fmt.Fprint(buf, indent) - } - } - - printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1) - - if i != val.Len()-1 || !allSimple { - fmt.Fprint(buf, ",") - } - } - - if formatOutput && !allSimple { - fmt.Fprintln(buf) - - for ind := 0; ind < level-1; ind++ { - fmt.Fprint(buf, indent) - } - } else { - fmt.Fprint(buf, " ") - } - - fmt.Fprint(buf, "}") - case reflect.Map: - var t = val.Type() - var keys = val.MapKeys() - - fmt.Fprint(buf, t) - fmt.Fprint(buf, "{") - - var allSimple = true - - for i := 0; i < len(keys); i++ { - var elem = val.MapIndex(keys[i]) - - var isSimple = isSimpleType(elem, elem.Kind(), pointers, interfaces) - - if !isSimple { - allSimple = false - } - - if formatOutput && !isSimple { - fmt.Fprintln(buf) - } else { - fmt.Fprint(buf, " ") - } - - if formatOutput && !isSimple { - for ind := 0; ind <= level; ind++ { - fmt.Fprint(buf, indent) - } - } - - printKeyValue(buf, keys[i], pointers, interfaces, structFilter, formatOutput, indent, level+1) - fmt.Fprint(buf, ": ") - printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1) - - if i != val.Len()-1 || !allSimple { - fmt.Fprint(buf, ",") - } - } - - if formatOutput && !allSimple { - fmt.Fprintln(buf) - - for ind := 0; ind < level-1; ind++ { - fmt.Fprint(buf, indent) - } - } else { - fmt.Fprint(buf, " ") - } - - fmt.Fprint(buf, "}") - case reflect.Chan: - fmt.Fprint(buf, val.Type()) - case reflect.Invalid: - fmt.Fprint(buf, "invalid") - default: - fmt.Fprint(buf, "unknow") - } -} - -// PrintPointerInfo dump pointer value -func PrintPointerInfo(buf *bytes.Buffer, headlen int, pointers *pointerInfo) { - var anyused = false - var pointerNum = 0 - - for p := pointers; p != nil; p = p.prev { - if len(p.used) > 0 { - anyused = true - } - pointerNum++ - p.n = pointerNum - } - - if anyused { - var pointerBufs = make([][]rune, pointerNum+1) - - for i := 0; i < len(pointerBufs); i++ { - var pointerBuf = make([]rune, buf.Len()+headlen) - - for j := 0; j < len(pointerBuf); j++ { - pointerBuf[j] = ' ' - } - - pointerBufs[i] = pointerBuf - } - - for pn := 0; pn <= pointerNum; pn++ { - for p := pointers; p != nil; p = p.prev { - if len(p.used) > 0 && p.n >= pn { - if pn == p.n { - pointerBufs[pn][p.pos+headlen] = '└' - - var maxpos = 0 - - for i, pos := range p.used { - if i < len(p.used)-1 { - pointerBufs[pn][pos+headlen] = '┴' - } else { - pointerBufs[pn][pos+headlen] = '┘' - } - - maxpos = pos - } - - for i := 0; i < maxpos-p.pos-1; i++ { - if pointerBufs[pn][i+p.pos+headlen+1] == ' ' { - pointerBufs[pn][i+p.pos+headlen+1] = '─' - } - } - } else { - pointerBufs[pn][p.pos+headlen] = '│' - - for _, pos := range p.used { - if pointerBufs[pn][pos+headlen] == ' ' { - pointerBufs[pn][pos+headlen] = '│' - } else { - pointerBufs[pn][pos+headlen] = '┼' - } - } - } - } - } - - buf.WriteString(string(pointerBufs[pn]) + "\n") - } - } -} - -// Stack get stack bytes -func Stack(skip int, indent string) []byte { - var buf = new(bytes.Buffer) - - for i := skip; ; i++ { - var pc, file, line, ok = runtime.Caller(i) - - if !ok { - break - } - - buf.WriteString(indent) - - fmt.Fprintf(buf, "at %s() [%s:%d]\n", function(pc), file, line) - } - - return buf.Bytes() -} - -// return the name of the function containing the PC if possible, -func function(pc uintptr) []byte { - fn := runtime.FuncForPC(pc) - if fn == nil { - return dunno - } - name := []byte(fn.Name()) - // The name includes the path name to the package, which is unnecessary - // since the file name is already included. Plus, it has center dots. - // That is, we see - // runtime/debug.*T·ptrmethod - // and want - // *T.ptrmethod - if period := bytes.Index(name, dot); period >= 0 { - name = name[period+1:] - } - name = bytes.Replace(name, centerDot, dot, -1) - return name -} diff --git a/vendor/github.com/astaxie/beego/utils/file.go b/vendor/github.com/astaxie/beego/utils/file.go deleted file mode 100644 index 6090eb171..000000000 --- a/vendor/github.com/astaxie/beego/utils/file.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "bufio" - "errors" - "io" - "os" - "path/filepath" - "regexp" -) - -// SelfPath gets compiled executable file absolute path -func SelfPath() string { - path, _ := filepath.Abs(os.Args[0]) - return path -} - -// SelfDir gets compiled executable file directory -func SelfDir() string { - return filepath.Dir(SelfPath()) -} - -// FileExists reports whether the named file or directory exists. -func FileExists(name string) bool { - if _, err := os.Stat(name); err != nil { - if os.IsNotExist(err) { - return false - } - } - return true -} - -// SearchFile Search a file in paths. -// this is often used in search config file in /etc ~/ -func SearchFile(filename string, paths ...string) (fullpath string, err error) { - for _, path := range paths { - if fullpath = filepath.Join(path, filename); FileExists(fullpath) { - return - } - } - err = errors.New(fullpath + " not found in paths") - return -} - -// GrepFile like command grep -E -// for example: GrepFile(`^hello`, "hello.txt") -// \n is striped while read -func GrepFile(patten string, filename string) (lines []string, err error) { - re, err := regexp.Compile(patten) - if err != nil { - return - } - - fd, err := os.Open(filename) - if err != nil { - return - } - lines = make([]string, 0) - reader := bufio.NewReader(fd) - prefix := "" - var isLongLine bool - for { - byteLine, isPrefix, er := reader.ReadLine() - if er != nil && er != io.EOF { - return nil, er - } - if er == io.EOF { - break - } - line := string(byteLine) - if isPrefix { - prefix += line - continue - } else { - isLongLine = true - } - - line = prefix + line - if isLongLine { - prefix = "" - } - if re.MatchString(line) { - lines = append(lines, line) - } - } - return lines, nil -} diff --git a/vendor/github.com/astaxie/beego/utils/mail.go b/vendor/github.com/astaxie/beego/utils/mail.go deleted file mode 100644 index e3fa1c909..000000000 --- a/vendor/github.com/astaxie/beego/utils/mail.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "mime" - "mime/multipart" - "net/mail" - "net/smtp" - "net/textproto" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" -) - -const ( - maxLineLength = 76 - - upperhex = "0123456789ABCDEF" -) - -// Email is the type used for email messages -type Email struct { - Auth smtp.Auth - Identity string `json:"identity"` - Username string `json:"username"` - Password string `json:"password"` - Host string `json:"host"` - Port int `json:"port"` - From string `json:"from"` - To []string - Bcc []string - Cc []string - Subject string - Text string // Plaintext message (optional) - HTML string // Html message (optional) - Headers textproto.MIMEHeader - Attachments []*Attachment - ReadReceipt []string -} - -// Attachment is a struct representing an email attachment. -// Based on the mime/multipart.FileHeader struct, Attachment contains the name, MIMEHeader, and content of the attachment in question -type Attachment struct { - Filename string - Header textproto.MIMEHeader - Content []byte -} - -// NewEMail create new Email struct with config json. -// config json is followed from Email struct fields. -func NewEMail(config string) *Email { - e := new(Email) - e.Headers = textproto.MIMEHeader{} - err := json.Unmarshal([]byte(config), e) - if err != nil { - return nil - } - return e -} - -// Bytes Make all send information to byte -func (e *Email) Bytes() ([]byte, error) { - buff := &bytes.Buffer{} - w := multipart.NewWriter(buff) - // Set the appropriate headers (overwriting any conflicts) - // Leave out Bcc (only included in envelope headers) - e.Headers.Set("To", strings.Join(e.To, ",")) - if e.Cc != nil { - e.Headers.Set("Cc", strings.Join(e.Cc, ",")) - } - e.Headers.Set("From", e.From) - e.Headers.Set("Subject", e.Subject) - if len(e.ReadReceipt) != 0 { - e.Headers.Set("Disposition-Notification-To", strings.Join(e.ReadReceipt, ",")) - } - e.Headers.Set("MIME-Version", "1.0") - - // Write the envelope headers (including any custom headers) - if err := headerToBytes(buff, e.Headers); err != nil { - return nil, fmt.Errorf("Failed to render message headers: %s", err) - } - - e.Headers.Set("Content-Type", fmt.Sprintf("multipart/mixed;\r\n boundary=%s\r\n", w.Boundary())) - fmt.Fprintf(buff, "%s:", "Content-Type") - fmt.Fprintf(buff, " %s\r\n", fmt.Sprintf("multipart/mixed;\r\n boundary=%s\r\n", w.Boundary())) - - // Start the multipart/mixed part - fmt.Fprintf(buff, "--%s\r\n", w.Boundary()) - header := textproto.MIMEHeader{} - // Check to see if there is a Text or HTML field - if e.Text != "" || e.HTML != "" { - subWriter := multipart.NewWriter(buff) - // Create the multipart alternative part - header.Set("Content-Type", fmt.Sprintf("multipart/alternative;\r\n boundary=%s\r\n", subWriter.Boundary())) - // Write the header - if err := headerToBytes(buff, header); err != nil { - return nil, fmt.Errorf("Failed to render multipart message headers: %s", err) - } - // Create the body sections - if e.Text != "" { - header.Set("Content-Type", fmt.Sprintf("text/plain; charset=UTF-8")) - header.Set("Content-Transfer-Encoding", "quoted-printable") - if _, err := subWriter.CreatePart(header); err != nil { - return nil, err - } - // Write the text - if err := quotePrintEncode(buff, e.Text); err != nil { - return nil, err - } - } - if e.HTML != "" { - header.Set("Content-Type", fmt.Sprintf("text/html; charset=UTF-8")) - header.Set("Content-Transfer-Encoding", "quoted-printable") - if _, err := subWriter.CreatePart(header); err != nil { - return nil, err - } - // Write the text - if err := quotePrintEncode(buff, e.HTML); err != nil { - return nil, err - } - } - if err := subWriter.Close(); err != nil { - return nil, err - } - } - // Create attachment part, if necessary - for _, a := range e.Attachments { - ap, err := w.CreatePart(a.Header) - if err != nil { - return nil, err - } - // Write the base64Wrapped content to the part - base64Wrap(ap, a.Content) - } - if err := w.Close(); err != nil { - return nil, err - } - return buff.Bytes(), nil -} - -// AttachFile Add attach file to the send mail -func (e *Email) AttachFile(args ...string) (a *Attachment, err error) { - if len(args) < 1 && len(args) > 2 { - err = errors.New("Must specify a file name and number of parameters can not exceed at least two") - return - } - filename := args[0] - id := "" - if len(args) > 1 { - id = args[1] - } - f, err := os.Open(filename) - if err != nil { - return - } - ct := mime.TypeByExtension(filepath.Ext(filename)) - basename := path.Base(filename) - return e.Attach(f, basename, ct, id) -} - -// Attach is used to attach content from an io.Reader to the email. -// Parameters include an io.Reader, the desired filename for the attachment, and the Content-Type. -func (e *Email) Attach(r io.Reader, filename string, args ...string) (a *Attachment, err error) { - if len(args) < 1 && len(args) > 2 { - err = errors.New("Must specify the file type and number of parameters can not exceed at least two") - return - } - c := args[0] //Content-Type - id := "" - if len(args) > 1 { - id = args[1] //Content-ID - } - var buffer bytes.Buffer - if _, err = io.Copy(&buffer, r); err != nil { - return - } - at := &Attachment{ - Filename: filename, - Header: textproto.MIMEHeader{}, - Content: buffer.Bytes(), - } - // Get the Content-Type to be used in the MIMEHeader - if c != "" { - at.Header.Set("Content-Type", c) - } else { - // If the Content-Type is blank, set the Content-Type to "application/octet-stream" - at.Header.Set("Content-Type", "application/octet-stream") - } - if id != "" { - at.Header.Set("Content-Disposition", fmt.Sprintf("inline;\r\n filename=\"%s\"", filename)) - at.Header.Set("Content-ID", fmt.Sprintf("<%s>", id)) - } else { - at.Header.Set("Content-Disposition", fmt.Sprintf("attachment;\r\n filename=\"%s\"", filename)) - } - at.Header.Set("Content-Transfer-Encoding", "base64") - e.Attachments = append(e.Attachments, at) - return at, nil -} - -// Send will send out the mail -func (e *Email) Send() error { - if e.Auth == nil { - e.Auth = smtp.PlainAuth(e.Identity, e.Username, e.Password, e.Host) - } - // Merge the To, Cc, and Bcc fields - to := make([]string, 0, len(e.To)+len(e.Cc)+len(e.Bcc)) - to = append(append(append(to, e.To...), e.Cc...), e.Bcc...) - // Check to make sure there is at least one recipient and one "From" address - if len(to) == 0 { - return errors.New("Must specify at least one To address") - } - - // Use the username if no From is provided - if len(e.From) == 0 { - e.From = e.Username - } - - from, err := mail.ParseAddress(e.From) - if err != nil { - return err - } - - // use mail's RFC 2047 to encode any string - e.Subject = qEncode("utf-8", e.Subject) - - raw, err := e.Bytes() - if err != nil { - return err - } - return smtp.SendMail(e.Host+":"+strconv.Itoa(e.Port), e.Auth, from.Address, to, raw) -} - -// quotePrintEncode writes the quoted-printable text to the IO Writer (according to RFC 2045) -func quotePrintEncode(w io.Writer, s string) error { - var buf [3]byte - mc := 0 - for i := 0; i < len(s); i++ { - c := s[i] - // We're assuming Unix style text formats as input (LF line break), and - // quoted-printble uses CRLF line breaks. (Literal CRs will become - // "=0D", but probably shouldn't be there to begin with!) - if c == '\n' { - io.WriteString(w, "\r\n") - mc = 0 - continue - } - - var nextOut []byte - if isPrintable(c) { - nextOut = append(buf[:0], c) - } else { - nextOut = buf[:] - qpEscape(nextOut, c) - } - - // Add a soft line break if the next (encoded) byte would push this line - // to or past the limit. - if mc+len(nextOut) >= maxLineLength { - if _, err := io.WriteString(w, "=\r\n"); err != nil { - return err - } - mc = 0 - } - - if _, err := w.Write(nextOut); err != nil { - return err - } - mc += len(nextOut) - } - // No trailing end-of-line?? Soft line break, then. TODO: is this sane? - if mc > 0 { - io.WriteString(w, "=\r\n") - } - return nil -} - -// isPrintable returns true if the rune given is "printable" according to RFC 2045, false otherwise -func isPrintable(c byte) bool { - return (c >= '!' && c <= '<') || (c >= '>' && c <= '~') || (c == ' ' || c == '\n' || c == '\t') -} - -// qpEscape is a helper function for quotePrintEncode which escapes a -// non-printable byte. Expects len(dest) == 3. -func qpEscape(dest []byte, c byte) { - const nums = "0123456789ABCDEF" - dest[0] = '=' - dest[1] = nums[(c&0xf0)>>4] - dest[2] = nums[(c & 0xf)] -} - -// headerToBytes enumerates the key and values in the header, and writes the results to the IO Writer -func headerToBytes(w io.Writer, t textproto.MIMEHeader) error { - for k, v := range t { - // Write the header key - _, err := fmt.Fprintf(w, "%s:", k) - if err != nil { - return err - } - // Write each value in the header - for _, c := range v { - _, err := fmt.Fprintf(w, " %s\r\n", c) - if err != nil { - return err - } - } - } - return nil -} - -// base64Wrap encodes the attachment content, and wraps it according to RFC 2045 standards (every 76 chars) -// The output is then written to the specified io.Writer -func base64Wrap(w io.Writer, b []byte) { - // 57 raw bytes per 76-byte base64 line. - const maxRaw = 57 - // Buffer for each line, including trailing CRLF. - var buffer [maxLineLength + len("\r\n")]byte - copy(buffer[maxLineLength:], "\r\n") - // Process raw chunks until there's no longer enough to fill a line. - for len(b) >= maxRaw { - base64.StdEncoding.Encode(buffer[:], b[:maxRaw]) - w.Write(buffer[:]) - b = b[maxRaw:] - } - // Handle the last chunk of bytes. - if len(b) > 0 { - out := buffer[:base64.StdEncoding.EncodedLen(len(b))] - base64.StdEncoding.Encode(out, b) - out = append(out, "\r\n"...) - w.Write(out) - } -} - -// Encode returns the encoded-word form of s. If s is ASCII without special -// characters, it is returned unchanged. The provided charset is the IANA -// charset name of s. It is case insensitive. -// RFC 2047 encoded-word -func qEncode(charset, s string) string { - if !needsEncoding(s) { - return s - } - return encodeWord(charset, s) -} - -func needsEncoding(s string) bool { - for _, b := range s { - if (b < ' ' || b > '~') && b != '\t' { - return true - } - } - return false -} - -// encodeWord encodes a string into an encoded-word. -func encodeWord(charset, s string) string { - buf := getBuffer() - - buf.WriteString("=?") - buf.WriteString(charset) - buf.WriteByte('?') - buf.WriteByte('q') - buf.WriteByte('?') - - enc := make([]byte, 3) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == ' ': - buf.WriteByte('_') - case b <= '~' && b >= '!' && b != '=' && b != '?' && b != '_': - buf.WriteByte(b) - default: - enc[0] = '=' - enc[1] = upperhex[b>>4] - enc[2] = upperhex[b&0x0f] - buf.Write(enc) - } - } - buf.WriteString("?=") - - es := buf.String() - putBuffer(buf) - return es -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -func getBuffer() *bytes.Buffer { - return bufPool.Get().(*bytes.Buffer) -} - -func putBuffer(buf *bytes.Buffer) { - if buf.Len() > 1024 { - return - } - buf.Reset() - bufPool.Put(buf) -} diff --git a/vendor/github.com/astaxie/beego/utils/rand.go b/vendor/github.com/astaxie/beego/utils/rand.go deleted file mode 100644 index 344d1cd53..000000000 --- a/vendor/github.com/astaxie/beego/utils/rand.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "crypto/rand" - r "math/rand" - "time" -) - -var alphaNum = []byte(`0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`) - -// RandomCreateBytes generate random []byte by specify chars. -func RandomCreateBytes(n int, alphabets ...byte) []byte { - if len(alphabets) == 0 { - alphabets = alphaNum - } - var bytes = make([]byte, n) - var randBy bool - if num, err := rand.Read(bytes); num != n || err != nil { - r.Seed(time.Now().UnixNano()) - randBy = true - } - for i, b := range bytes { - if randBy { - bytes[i] = alphabets[r.Intn(len(alphabets))] - } else { - bytes[i] = alphabets[b%byte(len(alphabets))] - } - } - return bytes -} diff --git a/vendor/github.com/astaxie/beego/utils/safemap.go b/vendor/github.com/astaxie/beego/utils/safemap.go deleted file mode 100644 index 1793030a5..000000000 --- a/vendor/github.com/astaxie/beego/utils/safemap.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "sync" -) - -// BeeMap is a map with lock -type BeeMap struct { - lock *sync.RWMutex - bm map[interface{}]interface{} -} - -// NewBeeMap return new safemap -func NewBeeMap() *BeeMap { - return &BeeMap{ - lock: new(sync.RWMutex), - bm: make(map[interface{}]interface{}), - } -} - -// Get from maps return the k's value -func (m *BeeMap) Get(k interface{}) interface{} { - m.lock.RLock() - defer m.lock.RUnlock() - if val, ok := m.bm[k]; ok { - return val - } - return nil -} - -// Set Maps the given key and value. Returns false -// if the key is already in the map and changes nothing. -func (m *BeeMap) Set(k interface{}, v interface{}) bool { - m.lock.Lock() - defer m.lock.Unlock() - if val, ok := m.bm[k]; !ok { - m.bm[k] = v - } else if val != v { - m.bm[k] = v - } else { - return false - } - return true -} - -// Check Returns true if k is exist in the map. -func (m *BeeMap) Check(k interface{}) bool { - m.lock.RLock() - defer m.lock.RUnlock() - _, ok := m.bm[k] - return ok -} - -// Delete the given key and value. -func (m *BeeMap) Delete(k interface{}) { - m.lock.Lock() - defer m.lock.Unlock() - delete(m.bm, k) -} - -// Items returns all items in safemap. -func (m *BeeMap) Items() map[interface{}]interface{} { - m.lock.RLock() - defer m.lock.RUnlock() - r := make(map[interface{}]interface{}) - for k, v := range m.bm { - r[k] = v - } - return r -} - -// Count returns the number of items within the map. -func (m *BeeMap) Count() int { - m.lock.RLock() - defer m.lock.RUnlock() - return len(m.bm) -} diff --git a/vendor/github.com/astaxie/beego/utils/slice.go b/vendor/github.com/astaxie/beego/utils/slice.go deleted file mode 100644 index 8f2cef980..000000000 --- a/vendor/github.com/astaxie/beego/utils/slice.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "math/rand" - "time" -) - -type reducetype func(interface{}) interface{} -type filtertype func(interface{}) bool - -// InSlice checks given string in string slice or not. -func InSlice(v string, sl []string) bool { - for _, vv := range sl { - if vv == v { - return true - } - } - return false -} - -// InSliceIface checks given interface in interface slice. -func InSliceIface(v interface{}, sl []interface{}) bool { - for _, vv := range sl { - if vv == v { - return true - } - } - return false -} - -// SliceRandList generate an int slice from min to max. -func SliceRandList(min, max int) []int { - if max < min { - min, max = max, min - } - length := max - min + 1 - t0 := time.Now() - rand.Seed(int64(t0.Nanosecond())) - list := rand.Perm(length) - for index := range list { - list[index] += min - } - return list -} - -// SliceMerge merges interface slices to one slice. -func SliceMerge(slice1, slice2 []interface{}) (c []interface{}) { - c = append(slice1, slice2...) - return -} - -// SliceReduce generates a new slice after parsing every value by reduce function -func SliceReduce(slice []interface{}, a reducetype) (dslice []interface{}) { - for _, v := range slice { - dslice = append(dslice, a(v)) - } - return -} - -// SliceRand returns random one from slice. -func SliceRand(a []interface{}) (b interface{}) { - randnum := rand.Intn(len(a)) - b = a[randnum] - return -} - -// SliceSum sums all values in int64 slice. -func SliceSum(intslice []int64) (sum int64) { - for _, v := range intslice { - sum += v - } - return -} - -// SliceFilter generates a new slice after filter function. -func SliceFilter(slice []interface{}, a filtertype) (ftslice []interface{}) { - for _, v := range slice { - if a(v) { - ftslice = append(ftslice, v) - } - } - return -} - -// SliceDiff returns diff slice of slice1 - slice2. -func SliceDiff(slice1, slice2 []interface{}) (diffslice []interface{}) { - for _, v := range slice1 { - if !InSliceIface(v, slice2) { - diffslice = append(diffslice, v) - } - } - return -} - -// SliceIntersect returns slice that are present in all the slice1 and slice2. -func SliceIntersect(slice1, slice2 []interface{}) (diffslice []interface{}) { - for _, v := range slice1 { - if InSliceIface(v, slice2) { - diffslice = append(diffslice, v) - } - } - return -} - -// SliceChunk separates one slice to some sized slice. -func SliceChunk(slice []interface{}, size int) (chunkslice [][]interface{}) { - if size >= len(slice) { - chunkslice = append(chunkslice, slice) - return - } - end := size - for i := 0; i <= (len(slice) - size); i += size { - chunkslice = append(chunkslice, slice[i:end]) - end += size - } - return -} - -// SliceRange generates a new slice from begin to end with step duration of int64 number. -func SliceRange(start, end, step int64) (intslice []int64) { - for i := start; i <= end; i += step { - intslice = append(intslice, i) - } - return -} - -// SlicePad prepends size number of val into slice. -func SlicePad(slice []interface{}, size int, val interface{}) []interface{} { - if size <= len(slice) { - return slice - } - for i := 0; i < (size - len(slice)); i++ { - slice = append(slice, val) - } - return slice -} - -// SliceUnique cleans repeated values in slice. -func SliceUnique(slice []interface{}) (uniqueslice []interface{}) { - for _, v := range slice { - if !InSliceIface(v, uniqueslice) { - uniqueslice = append(uniqueslice, v) - } - } - return -} - -// SliceShuffle shuffles a slice. -func SliceShuffle(slice []interface{}) []interface{} { - for i := 0; i < len(slice); i++ { - a := rand.Intn(len(slice)) - b := rand.Intn(len(slice)) - slice[a], slice[b] = slice[b], slice[a] - } - return slice -} diff --git a/vendor/github.com/astaxie/beego/utils/utils.go b/vendor/github.com/astaxie/beego/utils/utils.go deleted file mode 100644 index ed8857873..000000000 --- a/vendor/github.com/astaxie/beego/utils/utils.go +++ /dev/null @@ -1,30 +0,0 @@ -package utils - -import ( - "os" - "path/filepath" - "runtime" - "strings" -) - -// GetGOPATHs returns all paths in GOPATH variable. -func GetGOPATHs() []string { - gopath := os.Getenv("GOPATH") - if gopath == "" && strings.Compare(runtime.Version(), "go1.8") >= 0 { - gopath = defaultGOPATH() - } - return filepath.SplitList(gopath) -} - -func defaultGOPATH() string { - env := "HOME" - if runtime.GOOS == "windows" { - env = "USERPROFILE" - } else if runtime.GOOS == "plan9" { - env = "home" - } - if home := os.Getenv(env); home != "" { - return filepath.Join(home, "go") - } - return "" -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be6..000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8eb..000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index 47a6a46ec..000000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/vendor/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 55ebc98fc..000000000 --- a/vendor/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 3676ee405..000000000 --- a/vendor/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/context.go deleted file mode 100644 index 7706faa2b..000000000 --- a/vendor/github.com/cenkalti/backoff/context.go +++ /dev/null @@ -1,63 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func ensureContext(b BackOff) BackOffContext { - if cb, ok := b.(BackOffContext); ok { - return cb - } - return WithContext(b, context.Background()) -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { - return Stop - } - return next -} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index a031a6597..000000000 --- a/vendor/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,153 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index e936a506f..000000000 --- a/vendor/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - var t *time.Timer - - cb := ensureContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err - } - - if next = cb.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - if t == nil { - t = time.NewTimer(next) - defer t.Stop() - } else { - t.Reset(next) - } - - select { - case <-cb.Context().Done(): - return err - case <-t.C: - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index e41084b0e..000000000 --- a/vendor/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import ( - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOffContext - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: ensureContext(b), - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.b.Context().Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/tries.go deleted file mode 100644 index cfeefd9b7..000000000 --- a/vendor/github.com/cenkalti/backoff/tries.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/ceph/go-ceph/LICENSE b/vendor/github.com/ceph/go-ceph/LICENSE deleted file mode 100644 index 08d70bfc0..000000000 --- a/vendor/github.com/ceph/go-ceph/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Noah Watkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/ceph/go-ceph/rados/conn.go b/vendor/github.com/ceph/go-ceph/rados/conn.go deleted file mode 100644 index 381fa4c1c..000000000 --- a/vendor/github.com/ceph/go-ceph/rados/conn.go +++ /dev/null @@ -1,308 +0,0 @@ -package rados - -// #cgo LDFLAGS: -lrados -// #include -// #include -import "C" - -import "unsafe" -import "bytes" - -// ClusterStat represents Ceph cluster statistics. -type ClusterStat struct { - Kb uint64 - Kb_used uint64 - Kb_avail uint64 - Num_objects uint64 -} - -// Conn is a connection handle to a Ceph cluster. -type Conn struct { - cluster C.rados_t -} - -// PingMonitor sends a ping to a monitor and returns the reply. -func (c *Conn) PingMonitor(id string) (string, error) { - c_id := C.CString(id) - defer C.free(unsafe.Pointer(c_id)) - - var strlen C.size_t - var strout *C.char - - ret := C.rados_ping_monitor(c.cluster, c_id, &strout, &strlen) - defer C.rados_buffer_free(strout) - - if ret == 0 { - reply := C.GoStringN(strout, (C.int)(strlen)) - return reply, nil - } else { - return "", RadosError(int(ret)) - } -} - -// Connect establishes a connection to a RADOS cluster. It returns an error, -// if any. -func (c *Conn) Connect() error { - ret := C.rados_connect(c.cluster) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Shutdown disconnects from the cluster. -func (c *Conn) Shutdown() { - C.rados_shutdown(c.cluster) -} - -// ReadConfigFile configures the connection using a Ceph configuration file. -func (c *Conn) ReadConfigFile(path string) error { - c_path := C.CString(path) - defer C.free(unsafe.Pointer(c_path)) - ret := C.rados_conf_read_file(c.cluster, c_path) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// ReadDefaultConfigFile configures the connection using a Ceph configuration -// file located at default locations. -func (c *Conn) ReadDefaultConfigFile() error { - ret := C.rados_conf_read_file(c.cluster, nil) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -func (c *Conn) OpenIOContext(pool string) (*IOContext, error) { - c_pool := C.CString(pool) - defer C.free(unsafe.Pointer(c_pool)) - ioctx := &IOContext{} - ret := C.rados_ioctx_create(c.cluster, c_pool, &ioctx.ioctx) - if ret == 0 { - return ioctx, nil - } else { - return nil, RadosError(int(ret)) - } -} - -// ListPools returns the names of all existing pools. -func (c *Conn) ListPools() (names []string, err error) { - buf := make([]byte, 4096) - for { - ret := int(C.rados_pool_list(c.cluster, - (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) - if ret < 0 { - return nil, RadosError(int(ret)) - } - - if ret > len(buf) { - buf = make([]byte, ret) - continue - } - - tmp := bytes.SplitAfter(buf[:ret-1], []byte{0}) - for _, s := range tmp { - if len(s) > 0 { - name := C.GoString((*C.char)(unsafe.Pointer(&s[0]))) - names = append(names, name) - } - } - - return names, nil - } -} - -// SetConfigOption sets the value of the configuration option identified by -// the given name. -func (c *Conn) SetConfigOption(option, value string) error { - c_opt, c_val := C.CString(option), C.CString(value) - defer C.free(unsafe.Pointer(c_opt)) - defer C.free(unsafe.Pointer(c_val)) - ret := C.rados_conf_set(c.cluster, c_opt, c_val) - if ret < 0 { - return RadosError(int(ret)) - } else { - return nil - } -} - -// GetConfigOption returns the value of the Ceph configuration option -// identified by the given name. -func (c *Conn) GetConfigOption(name string) (value string, err error) { - buf := make([]byte, 4096) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - ret := int(C.rados_conf_get(c.cluster, c_name, - (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) - // FIXME: ret may be -ENAMETOOLONG if the buffer is not large enough. We - // can handle this case, but we need a reliable way to test for - // -ENAMETOOLONG constant. Will the syscall/Errno stuff in Go help? - if ret == 0 { - value = C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) - return value, nil - } else { - return "", RadosError(ret) - } -} - -// WaitForLatestOSDMap blocks the caller until the latest OSD map has been -// retrieved. -func (c *Conn) WaitForLatestOSDMap() error { - ret := C.rados_wait_for_latest_osdmap(c.cluster) - if ret < 0 { - return RadosError(int(ret)) - } else { - return nil - } -} - -// GetClusterStat returns statistics about the cluster associated with the -// connection. -func (c *Conn) GetClusterStats() (stat ClusterStat, err error) { - c_stat := C.struct_rados_cluster_stat_t{} - ret := C.rados_cluster_stat(c.cluster, &c_stat) - if ret < 0 { - return ClusterStat{}, RadosError(int(ret)) - } else { - return ClusterStat{ - Kb: uint64(c_stat.kb), - Kb_used: uint64(c_stat.kb_used), - Kb_avail: uint64(c_stat.kb_avail), - Num_objects: uint64(c_stat.num_objects), - }, nil - } -} - -// ParseCmdLineArgs configures the connection from command line arguments. -func (c *Conn) ParseCmdLineArgs(args []string) error { - // add an empty element 0 -- Ceph treats the array as the actual contents - // of argv and skips the first element (the executable name) - argc := C.int(len(args) + 1) - argv := make([]*C.char, argc) - - // make the first element a string just in case it is ever examined - argv[0] = C.CString("placeholder") - defer C.free(unsafe.Pointer(argv[0])) - - for i, arg := range args { - argv[i+1] = C.CString(arg) - defer C.free(unsafe.Pointer(argv[i+1])) - } - - ret := C.rados_conf_parse_argv(c.cluster, argc, &argv[0]) - if ret < 0 { - return RadosError(int(ret)) - } else { - return nil - } -} - -// ParseDefaultConfigEnv configures the connection from the default Ceph -// environment variable(s). -func (c *Conn) ParseDefaultConfigEnv() error { - ret := C.rados_conf_parse_env(c.cluster, nil) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// GetFSID returns the fsid of the cluster as a hexadecimal string. The fsid -// is a unique identifier of an entire Ceph cluster. -func (c *Conn) GetFSID() (fsid string, err error) { - buf := make([]byte, 37) - ret := int(C.rados_cluster_fsid(c.cluster, - (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) - // FIXME: the success case isn't documented correctly in librados.h - if ret == 36 { - fsid = C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) - return fsid, nil - } else { - return "", RadosError(int(ret)) - } -} - -// GetInstanceID returns a globally unique identifier for the cluster -// connection instance. -func (c *Conn) GetInstanceID() uint64 { - // FIXME: are there any error cases for this? - return uint64(C.rados_get_instance_id(c.cluster)) -} - -// MakePool creates a new pool with default settings. -func (c *Conn) MakePool(name string) error { - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - ret := int(C.rados_pool_create(c.cluster, c_name)) - if ret == 0 { - return nil - } else { - return RadosError(ret) - } -} - -// DeletePool deletes a pool and all the data inside the pool. -func (c *Conn) DeletePool(name string) error { - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - ret := int(C.rados_pool_delete(c.cluster, c_name)) - if ret == 0 { - return nil - } else { - return RadosError(ret) - } -} - -// MonCommand sends a command to one of the monitors -func (c *Conn) MonCommand(args []byte) (buffer []byte, info string, err error) { - return c.monCommand(args, nil) -} - -// MonCommand sends a command to one of the monitors, with an input buffer -func (c *Conn) MonCommandWithInputBuffer(args, inputBuffer []byte) (buffer []byte, info string, err error) { - return c.monCommand(args, inputBuffer) -} - -func (c *Conn) monCommand(args, inputBuffer []byte) (buffer []byte, info string, err error) { - argv := C.CString(string(args)) - defer C.free(unsafe.Pointer(argv)) - - var ( - outs, outbuf *C.char - outslen, outbuflen C.size_t - ) - inbuf := C.CString(string(inputBuffer)) - inbufLen := len(inputBuffer) - defer C.free(unsafe.Pointer(inbuf)) - - ret := C.rados_mon_command(c.cluster, - &argv, 1, - inbuf, // bulk input (e.g. crush map) - C.size_t(inbufLen), // length inbuf - &outbuf, // buffer - &outbuflen, // buffer length - &outs, // status string - &outslen) - - if outslen > 0 { - info = C.GoStringN(outs, C.int(outslen)) - C.free(unsafe.Pointer(outs)) - } - if outbuflen > 0 { - buffer = C.GoBytes(unsafe.Pointer(outbuf), C.int(outbuflen)) - C.free(unsafe.Pointer(outbuf)) - } - if ret != 0 { - err = RadosError(int(ret)) - return nil, info, err - } - - return -} diff --git a/vendor/github.com/ceph/go-ceph/rados/doc.go b/vendor/github.com/ceph/go-ceph/rados/doc.go deleted file mode 100644 index 14babe93a..000000000 --- a/vendor/github.com/ceph/go-ceph/rados/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Set of wrappers around librados API. -*/ -package rados diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx.go b/vendor/github.com/ceph/go-ceph/rados/ioctx.go deleted file mode 100644 index 336aeb35e..000000000 --- a/vendor/github.com/ceph/go-ceph/rados/ioctx.go +++ /dev/null @@ -1,861 +0,0 @@ -package rados - -// #cgo LDFLAGS: -lrados -// #include -// #include -// #include -// -// char* nextChunk(char **idx) { -// char *copy; -// copy = strdup(*idx); -// *idx += strlen(*idx) + 1; -// return copy; -// } -import "C" - -import ( - "syscall" - "time" - "unsafe" -) - -// PoolStat represents Ceph pool statistics. -type PoolStat struct { - // space used in bytes - Num_bytes uint64 - // space used in KB - Num_kb uint64 - // number of objects in the pool - Num_objects uint64 - // number of clones of objects - Num_object_clones uint64 - // num_objects * num_replicas - Num_object_copies uint64 - Num_objects_missing_on_primary uint64 - // number of objects found on no OSDs - Num_objects_unfound uint64 - // number of objects replicated fewer times than they should be - // (but found on at least one OSD) - Num_objects_degraded uint64 - Num_rd uint64 - Num_rd_kb uint64 - Num_wr uint64 - Num_wr_kb uint64 -} - -// ObjectStat represents an object stat information -type ObjectStat struct { - // current length in bytes - Size uint64 - // last modification time - ModTime time.Time -} - -// LockInfo represents information on a current Ceph lock -type LockInfo struct { - NumLockers int - Exclusive bool - Tag string - Clients []string - Cookies []string - Addrs []string -} - -// IOContext represents a context for performing I/O within a pool. -type IOContext struct { - ioctx C.rados_ioctx_t -} - -// Pointer returns a uintptr representation of the IOContext. -func (ioctx *IOContext) Pointer() uintptr { - return uintptr(ioctx.ioctx) -} - -// SetNamespace sets the namespace for objects within this IO context (pool). -// Setting namespace to a empty or zero length string sets the pool to the default namespace. -func (ioctx *IOContext) SetNamespace(namespace string) { - var c_ns *C.char - if len(namespace) > 0 { - c_ns = C.CString(namespace) - defer C.free(unsafe.Pointer(c_ns)) - } - C.rados_ioctx_set_namespace(ioctx.ioctx, c_ns) -} - -// Write writes len(data) bytes to the object with key oid starting at byte -// offset offset. It returns an error, if any. -func (ioctx *IOContext) Write(oid string, data []byte, offset uint64) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_write(ioctx.ioctx, c_oid, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data)), - (C.uint64_t)(offset)) - - return GetRadosError(int(ret)) -} - -// WriteFull writes len(data) bytes to the object with key oid. -// The object is filled with the provided data. If the object exists, -// it is atomically truncated and then written. It returns an error, if any. -func (ioctx *IOContext) WriteFull(oid string, data []byte) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_write_full(ioctx.ioctx, c_oid, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data))) - return GetRadosError(int(ret)) -} - -// Append appends len(data) bytes to the object with key oid. -// The object is appended with the provided data. If the object exists, -// it is atomically appended to. It returns an error, if any. -func (ioctx *IOContext) Append(oid string, data []byte) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_append(ioctx.ioctx, c_oid, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data))) - return GetRadosError(int(ret)) -} - -// Read reads up to len(data) bytes from the object with key oid starting at byte -// offset offset. It returns the number of bytes read and an error, if any. -func (ioctx *IOContext) Read(oid string, data []byte, offset uint64) (int, error) { - if len(data) == 0 { - return 0, nil - } - - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_read( - ioctx.ioctx, - c_oid, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data)), - (C.uint64_t)(offset)) - - if ret >= 0 { - return int(ret), nil - } else { - return 0, GetRadosError(int(ret)) - } -} - -// Delete deletes the object with key oid. It returns an error, if any. -func (ioctx *IOContext) Delete(oid string) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - return GetRadosError(int(C.rados_remove(ioctx.ioctx, c_oid))) -} - -// Truncate resizes the object with key oid to size size. If the operation -// enlarges the object, the new area is logically filled with zeroes. If the -// operation shrinks the object, the excess data is removed. It returns an -// error, if any. -func (ioctx *IOContext) Truncate(oid string, size uint64) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - return GetRadosError(int(C.rados_trunc(ioctx.ioctx, c_oid, (C.uint64_t)(size)))) -} - -// Destroy informs librados that the I/O context is no longer in use. -// Resources associated with the context may not be freed immediately, and the -// context should not be used again after calling this method. -func (ioctx *IOContext) Destroy() { - C.rados_ioctx_destroy(ioctx.ioctx) -} - -// Stat returns a set of statistics about the pool associated with this I/O -// context. -func (ioctx *IOContext) GetPoolStats() (stat PoolStat, err error) { - c_stat := C.struct_rados_pool_stat_t{} - ret := C.rados_ioctx_pool_stat(ioctx.ioctx, &c_stat) - if ret < 0 { - return PoolStat{}, GetRadosError(int(ret)) - } else { - return PoolStat{ - Num_bytes: uint64(c_stat.num_bytes), - Num_kb: uint64(c_stat.num_kb), - Num_objects: uint64(c_stat.num_objects), - Num_object_clones: uint64(c_stat.num_object_clones), - Num_object_copies: uint64(c_stat.num_object_copies), - Num_objects_missing_on_primary: uint64(c_stat.num_objects_missing_on_primary), - Num_objects_unfound: uint64(c_stat.num_objects_unfound), - Num_objects_degraded: uint64(c_stat.num_objects_degraded), - Num_rd: uint64(c_stat.num_rd), - Num_rd_kb: uint64(c_stat.num_rd_kb), - Num_wr: uint64(c_stat.num_wr), - Num_wr_kb: uint64(c_stat.num_wr_kb), - }, nil - } -} - -// GetPoolName returns the name of the pool associated with the I/O context. -func (ioctx *IOContext) GetPoolName() (name string, err error) { - buf := make([]byte, 128) - for { - ret := C.rados_ioctx_get_pool_name(ioctx.ioctx, - (*C.char)(unsafe.Pointer(&buf[0])), C.unsigned(len(buf))) - if ret == -34 { // FIXME - buf = make([]byte, len(buf)*2) - continue - } else if ret < 0 { - return "", GetRadosError(int(ret)) - } - name = C.GoStringN((*C.char)(unsafe.Pointer(&buf[0])), ret) - return name, nil - } -} - -// ObjectListFunc is the type of the function called for each object visited -// by ListObjects. -type ObjectListFunc func(oid string) - -// ListObjects lists all of the objects in the pool associated with the I/O -// context, and called the provided listFn function for each object, passing -// to the function the name of the object. -func (ioctx *IOContext) ListObjects(listFn ObjectListFunc) error { - var ctx C.rados_list_ctx_t - ret := C.rados_objects_list_open(ioctx.ioctx, &ctx) - if ret < 0 { - return GetRadosError(int(ret)) - } - defer func() { C.rados_objects_list_close(ctx) }() - - for { - var c_entry *C.char - ret := C.rados_objects_list_next(ctx, &c_entry, nil) - if ret == -2 { // FIXME - return nil - } else if ret < 0 { - return GetRadosError(int(ret)) - } - listFn(C.GoString(c_entry)) - } - - panic("invalid state") -} - -// Stat returns the size of the object and its last modification time -func (ioctx *IOContext) Stat(object string) (stat ObjectStat, err error) { - var c_psize C.uint64_t - var c_pmtime C.time_t - c_object := C.CString(object) - defer C.free(unsafe.Pointer(c_object)) - - ret := C.rados_stat( - ioctx.ioctx, - c_object, - &c_psize, - &c_pmtime) - - if ret < 0 { - return ObjectStat{}, GetRadosError(int(ret)) - } else { - return ObjectStat{ - Size: uint64(c_psize), - ModTime: time.Unix(int64(c_pmtime), 0), - }, nil - } -} - -// GetXattr gets an xattr with key `name`, it returns the length of -// the key read or an error if not successful -func (ioctx *IOContext) GetXattr(object string, name string, data []byte) (int, error) { - c_object := C.CString(object) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_object)) - defer C.free(unsafe.Pointer(c_name)) - - ret := C.rados_getxattr( - ioctx.ioctx, - c_object, - c_name, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data))) - - if ret >= 0 { - return int(ret), nil - } else { - return 0, GetRadosError(int(ret)) - } -} - -// Sets an xattr for an object with key `name` with value as `data` -func (ioctx *IOContext) SetXattr(object string, name string, data []byte) error { - c_object := C.CString(object) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_object)) - defer C.free(unsafe.Pointer(c_name)) - - ret := C.rados_setxattr( - ioctx.ioctx, - c_object, - c_name, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data))) - - return GetRadosError(int(ret)) -} - -// function that lists all the xattrs for an object, since xattrs are -// a k-v pair, this function returns a map of k-v pairs on -// success, error code on failure -func (ioctx *IOContext) ListXattrs(oid string) (map[string][]byte, error) { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - var it C.rados_xattrs_iter_t - - ret := C.rados_getxattrs(ioctx.ioctx, c_oid, &it) - if ret < 0 { - return nil, GetRadosError(int(ret)) - } - defer func() { C.rados_getxattrs_end(it) }() - m := make(map[string][]byte) - for { - var c_name, c_val *C.char - var c_len C.size_t - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_val)) - - ret := C.rados_getxattrs_next(it, &c_name, &c_val, &c_len) - if ret < 0 { - return nil, GetRadosError(int(ret)) - } - // rados api returns a null name,val & 0-length upon - // end of iteration - if c_name == nil { - return m, nil // stop iteration - } - m[C.GoString(c_name)] = C.GoBytes(unsafe.Pointer(c_val), (C.int)(c_len)) - } -} - -// Remove an xattr with key `name` from object `oid` -func (ioctx *IOContext) RmXattr(oid string, name string) error { - c_oid := C.CString(oid) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - - ret := C.rados_rmxattr( - ioctx.ioctx, - c_oid, - c_name) - - return GetRadosError(int(ret)) -} - -// Append the map `pairs` to the omap `oid` -func (ioctx *IOContext) SetOmap(oid string, pairs map[string][]byte) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - var s C.size_t - var c *C.char - ptrSize := unsafe.Sizeof(c) - - c_keys := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize)) - c_values := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize)) - c_lengths := C.malloc(C.size_t(len(pairs)) * C.size_t(unsafe.Sizeof(s))) - - defer C.free(unsafe.Pointer(c_keys)) - defer C.free(unsafe.Pointer(c_values)) - defer C.free(unsafe.Pointer(c_lengths)) - - i := 0 - for key, value := range pairs { - // key - c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i)*ptrSize)) - *c_key_ptr = C.CString(key) - defer C.free(unsafe.Pointer(*c_key_ptr)) - - // value and its length - c_value_ptr := (**C.char)(unsafe.Pointer(uintptr(c_values) + uintptr(i)*ptrSize)) - - var c_length C.size_t - if len(value) > 0 { - *c_value_ptr = (*C.char)(unsafe.Pointer(&value[0])) - c_length = C.size_t(len(value)) - } else { - *c_value_ptr = nil - c_length = C.size_t(0) - } - - c_length_ptr := (*C.size_t)(unsafe.Pointer(uintptr(c_lengths) + uintptr(i)*ptrSize)) - *c_length_ptr = c_length - - i++ - } - - op := C.rados_create_write_op() - C.rados_write_op_omap_set( - op, - (**C.char)(c_keys), - (**C.char)(c_values), - (*C.size_t)(c_lengths), - C.size_t(len(pairs))) - - ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0) - C.rados_release_write_op(op) - - return GetRadosError(int(ret)) -} - -// OmapListFunc is the type of the function called for each omap key -// visited by ListOmapValues -type OmapListFunc func(key string, value []byte) - -// Iterate on a set of keys and their values from an omap -// `startAfter`: iterate only on the keys after this specified one -// `filterPrefix`: iterate only on the keys beginning with this prefix -// `maxReturn`: iterate no more than `maxReturn` key/value pairs -// `listFn`: the function called at each iteration -func (ioctx *IOContext) ListOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64, listFn OmapListFunc) error { - c_oid := C.CString(oid) - c_start_after := C.CString(startAfter) - c_filter_prefix := C.CString(filterPrefix) - c_max_return := C.uint64_t(maxReturn) - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_start_after)) - defer C.free(unsafe.Pointer(c_filter_prefix)) - - op := C.rados_create_read_op() - - var c_iter C.rados_omap_iter_t - var c_prval C.int - C.rados_read_op_omap_get_vals( - op, - c_start_after, - c_filter_prefix, - c_max_return, - &c_iter, - &c_prval, - ) - - ret := C.rados_read_op_operate(op, ioctx.ioctx, c_oid, 0) - - if int(ret) != 0 { - return GetRadosError(int(ret)) - } else if int(c_prval) != 0 { - return RadosError(int(c_prval)) - } - - for { - var c_key *C.char - var c_val *C.char - var c_len C.size_t - - ret = C.rados_omap_get_next(c_iter, &c_key, &c_val, &c_len) - - if int(ret) != 0 { - return GetRadosError(int(ret)) - } - - if c_key == nil { - break - } - - listFn(C.GoString(c_key), C.GoBytes(unsafe.Pointer(c_val), C.int(c_len))) - } - - C.rados_omap_get_end(c_iter) - C.rados_release_read_op(op) - - return nil -} - -// Fetch a set of keys and their values from an omap and returns then as a map -// `startAfter`: retrieve only the keys after this specified one -// `filterPrefix`: retrieve only the keys beginning with this prefix -// `maxReturn`: retrieve no more than `maxReturn` key/value pairs -func (ioctx *IOContext) GetOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64) (map[string][]byte, error) { - omap := map[string][]byte{} - - err := ioctx.ListOmapValues( - oid, startAfter, filterPrefix, maxReturn, - func(key string, value []byte) { - omap[key] = value - }, - ) - - return omap, err -} - -// Fetch all the keys and their values from an omap and returns then as a map -// `startAfter`: retrieve only the keys after this specified one -// `filterPrefix`: retrieve only the keys beginning with this prefix -// `iteratorSize`: internal number of keys to fetch during a read operation -func (ioctx *IOContext) GetAllOmapValues(oid string, startAfter string, filterPrefix string, iteratorSize int64) (map[string][]byte, error) { - omap := map[string][]byte{} - omapSize := 0 - - for { - err := ioctx.ListOmapValues( - oid, startAfter, filterPrefix, iteratorSize, - func(key string, value []byte) { - omap[key] = value - startAfter = key - }, - ) - - if err != nil { - return omap, err - } - - // End of omap - if len(omap) == omapSize { - break - } - - omapSize = len(omap) - } - - return omap, nil -} - -// Remove the specified `keys` from the omap `oid` -func (ioctx *IOContext) RmOmapKeys(oid string, keys []string) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - var c *C.char - ptrSize := unsafe.Sizeof(c) - - c_keys := C.malloc(C.size_t(len(keys)) * C.size_t(ptrSize)) - defer C.free(unsafe.Pointer(c_keys)) - - i := 0 - for _, key := range keys { - c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i)*ptrSize)) - *c_key_ptr = C.CString(key) - defer C.free(unsafe.Pointer(*c_key_ptr)) - i++ - } - - op := C.rados_create_write_op() - C.rados_write_op_omap_rm_keys( - op, - (**C.char)(c_keys), - C.size_t(len(keys))) - - ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0) - C.rados_release_write_op(op) - - return GetRadosError(int(ret)) -} - -// Clear the omap `oid` -func (ioctx *IOContext) CleanOmap(oid string) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - op := C.rados_create_write_op() - C.rados_write_op_omap_clear(op) - - ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0) - C.rados_release_write_op(op) - - return GetRadosError(int(ret)) -} - -type Iter struct { - ctx C.rados_list_ctx_t - err error - entry string -} - -type IterToken uint32 - -// Return a Iterator object that can be used to list the object names in the current pool -func (ioctx *IOContext) Iter() (*Iter, error) { - iter := Iter{} - if cerr := C.rados_objects_list_open(ioctx.ioctx, &iter.ctx); cerr < 0 { - return nil, GetRadosError(int(cerr)) - } - return &iter, nil -} - -// Returns a token marking the current position of the iterator. To be used in combination with Iter.Seek() -func (iter *Iter) Token() IterToken { - return IterToken(C.rados_objects_list_get_pg_hash_position(iter.ctx)) -} - -func (iter *Iter) Seek(token IterToken) { - C.rados_objects_list_seek(iter.ctx, C.uint32_t(token)) -} - -// Next retrieves the next object name in the pool/namespace iterator. -// Upon a successful invocation (return value of true), the Value method should -// be used to obtain the name of the retrieved object name. When the iterator is -// exhausted, Next returns false. The Err method should used to verify whether the -// end of the iterator was reached, or the iterator received an error. -// -// Example: -// iter := pool.Iter() -// defer iter.Close() -// for iter.Next() { -// fmt.Printf("%v\n", iter.Value()) -// } -// return iter.Err() -// -func (iter *Iter) Next() bool { - var c_entry *C.char - if cerr := C.rados_objects_list_next(iter.ctx, &c_entry, nil); cerr < 0 { - iter.err = GetRadosError(int(cerr)) - return false - } - iter.entry = C.GoString(c_entry) - return true -} - -// Returns the current value of the iterator (object name), after a successful call to Next. -func (iter *Iter) Value() string { - if iter.err != nil { - return "" - } - return iter.entry -} - -// Checks whether the iterator has encountered an error. -func (iter *Iter) Err() error { - if iter.err == RadosErrorNotFound { - return nil - } - return iter.err -} - -// Closes the iterator cursor on the server. Be aware that iterators are not closed automatically -// at the end of iteration. -func (iter *Iter) Close() { - C.rados_objects_list_close(iter.ctx) -} - -// Take an exclusive lock on an object. -func (ioctx *IOContext) LockExclusive(oid, name, cookie, desc string, duration time.Duration, flags *byte) (int, error) { - c_oid := C.CString(oid) - c_name := C.CString(name) - c_cookie := C.CString(cookie) - c_desc := C.CString(desc) - - var c_duration C.struct_timeval - if duration != 0 { - tv := syscall.NsecToTimeval(duration.Nanoseconds()) - c_duration = C.struct_timeval{tv_sec: C.__time_t(tv.Sec), tv_usec: C.__suseconds_t(tv.Usec)} - } - - var c_flags C.uint8_t - if flags != nil { - c_flags = C.uint8_t(*flags) - } - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_cookie)) - defer C.free(unsafe.Pointer(c_desc)) - - ret := C.rados_lock_exclusive( - ioctx.ioctx, - c_oid, - c_name, - c_cookie, - c_desc, - &c_duration, - c_flags) - - // 0 on success, negative error code on failure - // -EBUSY if the lock is already held by another (client, cookie) pair - // -EEXIST if the lock is already held by the same (client, cookie) pair - - switch ret { - case 0: - return int(ret), nil - case -16: // EBUSY - return int(ret), nil - case -17: // EEXIST - return int(ret), nil - default: - return int(ret), RadosError(int(ret)) - } -} - -// Take a shared lock on an object. -func (ioctx *IOContext) LockShared(oid, name, cookie, tag, desc string, duration time.Duration, flags *byte) (int, error) { - c_oid := C.CString(oid) - c_name := C.CString(name) - c_cookie := C.CString(cookie) - c_tag := C.CString(tag) - c_desc := C.CString(desc) - - var c_duration C.struct_timeval - if duration != 0 { - tv := syscall.NsecToTimeval(duration.Nanoseconds()) - c_duration = C.struct_timeval{tv_sec: C.__time_t(tv.Sec), tv_usec: C.__suseconds_t(tv.Usec)} - } - - var c_flags C.uint8_t - if flags != nil { - c_flags = C.uint8_t(*flags) - } - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_cookie)) - defer C.free(unsafe.Pointer(c_tag)) - defer C.free(unsafe.Pointer(c_desc)) - - ret := C.rados_lock_shared( - ioctx.ioctx, - c_oid, - c_name, - c_cookie, - c_tag, - c_desc, - &c_duration, - c_flags) - - // 0 on success, negative error code on failure - // -EBUSY if the lock is already held by another (client, cookie) pair - // -EEXIST if the lock is already held by the same (client, cookie) pair - - switch ret { - case 0: - return int(ret), nil - case -16: // EBUSY - return int(ret), nil - case -17: // EEXIST - return int(ret), nil - default: - return int(ret), RadosError(int(ret)) - } -} - -// Release a shared or exclusive lock on an object. -func (ioctx *IOContext) Unlock(oid, name, cookie string) (int, error) { - c_oid := C.CString(oid) - c_name := C.CString(name) - c_cookie := C.CString(cookie) - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_cookie)) - - // 0 on success, negative error code on failure - // -ENOENT if the lock is not held by the specified (client, cookie) pair - - ret := C.rados_unlock( - ioctx.ioctx, - c_oid, - c_name, - c_cookie) - - switch ret { - case 0: - return int(ret), nil - case -2: // -ENOENT - return int(ret), nil - default: - return int(ret), RadosError(int(ret)) - } -} - -// List clients that have locked the named object lock and information about the lock. -// The number of bytes required in each buffer is put in the corresponding size out parameter. -// If any of the provided buffers are too short, -ERANGE is returned after these sizes are filled in. -func (ioctx *IOContext) ListLockers(oid, name string) (*LockInfo, error) { - c_oid := C.CString(oid) - c_name := C.CString(name) - - c_tag := (*C.char)(C.malloc(C.size_t(1024))) - c_clients := (*C.char)(C.malloc(C.size_t(1024))) - c_cookies := (*C.char)(C.malloc(C.size_t(1024))) - c_addrs := (*C.char)(C.malloc(C.size_t(1024))) - - var c_exclusive C.int - c_tag_len := C.size_t(1024) - c_clients_len := C.size_t(1024) - c_cookies_len := C.size_t(1024) - c_addrs_len := C.size_t(1024) - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_tag)) - defer C.free(unsafe.Pointer(c_clients)) - defer C.free(unsafe.Pointer(c_cookies)) - defer C.free(unsafe.Pointer(c_addrs)) - - ret := C.rados_list_lockers( - ioctx.ioctx, - c_oid, - c_name, - &c_exclusive, - c_tag, - &c_tag_len, - c_clients, - &c_clients_len, - c_cookies, - &c_cookies_len, - c_addrs, - &c_addrs_len) - - splitCString := func(items *C.char, itemsLen C.size_t) []string { - currLen := 0 - clients := []string{} - for currLen < int(itemsLen) { - client := C.GoString(C.nextChunk(&items)) - clients = append(clients, client) - currLen += len(client) + 1 - } - return clients - } - - if ret < 0 { - return nil, RadosError(int(ret)) - } else { - return &LockInfo{int(ret), c_exclusive == 1, C.GoString(c_tag), splitCString(c_clients, c_clients_len), splitCString(c_cookies, c_cookies_len), splitCString(c_addrs, c_addrs_len)}, nil - } -} - -// Releases a shared or exclusive lock on an object, which was taken by the specified client. -func (ioctx *IOContext) BreakLock(oid, name, client, cookie string) (int, error) { - c_oid := C.CString(oid) - c_name := C.CString(name) - c_client := C.CString(client) - c_cookie := C.CString(cookie) - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_client)) - defer C.free(unsafe.Pointer(c_cookie)) - - // 0 on success, negative error code on failure - // -ENOENT if the lock is not held by the specified (client, cookie) pair - // -EINVAL if the client cannot be parsed - - ret := C.rados_break_lock( - ioctx.ioctx, - c_oid, - c_name, - c_client, - c_cookie) - - switch ret { - case 0: - return int(ret), nil - case -2: // -ENOENT - return int(ret), nil - case -22: // -EINVAL - return int(ret), nil - default: - return int(ret), RadosError(int(ret)) - } -} diff --git a/vendor/github.com/ceph/go-ceph/rados/rados.go b/vendor/github.com/ceph/go-ceph/rados/rados.go deleted file mode 100644 index 944e64271..000000000 --- a/vendor/github.com/ceph/go-ceph/rados/rados.go +++ /dev/null @@ -1,83 +0,0 @@ -package rados - -// #cgo LDFLAGS: -lrados -// #include -// #include -// #include -import "C" - -import ( - "fmt" - "unsafe" -) - -type RadosError int - -func (e RadosError) Error() string { - return fmt.Sprintf("rados: %s", C.GoString(C.strerror(C.int(-e)))) -} - -var RadosErrorNotFound = RadosError(-C.ENOENT) -var RadosErrorPermissionDenied = RadosError(-C.EPERM) - -func GetRadosError(err int) error { - if err == 0 { - return nil - } - return RadosError(err) -} - -// Version returns the major, minor, and patch components of the version of -// the RADOS library linked against. -func Version() (int, int, int) { - var c_major, c_minor, c_patch C.int - C.rados_version(&c_major, &c_minor, &c_patch) - return int(c_major), int(c_minor), int(c_patch) -} - -// NewConn creates a new connection object. It returns the connection and an -// error, if any. -func NewConn() (*Conn, error) { - conn := &Conn{} - ret := C.rados_create(&conn.cluster, nil) - - if ret == 0 { - return conn, nil - } else { - return nil, RadosError(int(ret)) - } -} - -// NewConnWithUser creates a new connection object with a custom username. -// It returns the connection and an error, if any. -func NewConnWithUser(user string) (*Conn, error) { - c_user := C.CString(user) - defer C.free(unsafe.Pointer(c_user)) - - conn := &Conn{} - ret := C.rados_create(&conn.cluster, c_user) - - if ret == 0 { - return conn, nil - } else { - return nil, RadosError(int(ret)) - } -} - -// NewConnWithClusterAndUser creates a new connection object for a specific cluster and username. -// It returns the connection and an error, if any. -func NewConnWithClusterAndUser(clusterName string, userName string) (*Conn, error) { - c_cluster_name := C.CString(clusterName) - defer C.free(unsafe.Pointer(c_cluster_name)) - - c_name := C.CString(userName) - defer C.free(unsafe.Pointer(c_name)) - - conn := &Conn{} - ret := C.rados_create2(&conn.cluster, c_cluster_name, c_name, 0) - if ret == 0 { - return conn, nil - } else { - return nil, RadosError(int(ret)) - } -} diff --git a/vendor/github.com/ceph/go-ceph/rbd/doc.go b/vendor/github.com/ceph/go-ceph/rbd/doc.go deleted file mode 100644 index 70e70f3c6..000000000 --- a/vendor/github.com/ceph/go-ceph/rbd/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Wrappers around librbd. -*/ -package rbd diff --git a/vendor/github.com/ceph/go-ceph/rbd/rbd.go b/vendor/github.com/ceph/go-ceph/rbd/rbd.go deleted file mode 100644 index 8e3fd8104..000000000 --- a/vendor/github.com/ceph/go-ceph/rbd/rbd.go +++ /dev/null @@ -1,874 +0,0 @@ -package rbd - -// #cgo LDFLAGS: -lrbd -// #include -// #include -// #include -// #include -import "C" - -import ( - "bytes" - "errors" - "fmt" - "github.com/ceph/go-ceph/rados" - "io" - "unsafe" -) - -// -type RBDError int - -var RbdErrorImageNotOpen = errors.New("RBD image not open") -var RbdErrorNotFound = errors.New("RBD image not found") - -//Rdb feature -var RbdFeatureLayering = uint64(1 << 0) -var RbdFeatureStripingV2 = uint64(1 << 1) - -// -type ImageInfo struct { - Size uint64 - Obj_size uint64 - Num_objs uint64 - Order int - Block_name_prefix string - Parent_pool int64 - Parent_name string -} - -// -type SnapInfo struct { - Id uint64 - Size uint64 - Name string -} - -// -type Locker struct { - Client string - Cookie string - Addr string -} - -// -type Image struct { - io.Reader - io.Writer - io.Seeker - io.ReaderAt - io.WriterAt - name string - offset int64 - ioctx *rados.IOContext - image C.rbd_image_t -} - -// -type Snapshot struct { - image *Image - name string -} - -// -func split(buf []byte) (values []string) { - tmp := bytes.Split(buf[:len(buf)-1], []byte{0}) - for _, s := range tmp { - if len(s) > 0 { - go_s := C.GoString((*C.char)(unsafe.Pointer(&s[0]))) - values = append(values, go_s) - } - } - return values -} - -// -func (e RBDError) Error() string { - return fmt.Sprintf("rbd: ret=%d", e) -} - -// -func GetError(err C.int) error { - if err != 0 { - if err == -C.ENOENT { - return RbdErrorNotFound - } - return RBDError(err) - } else { - return nil - } -} - -// -func Version() (int, int, int) { - var c_major, c_minor, c_patch C.int - C.rbd_version(&c_major, &c_minor, &c_patch) - return int(c_major), int(c_minor), int(c_patch) -} - -// GetImageNames returns the list of current RBD images. -func GetImageNames(ioctx *rados.IOContext) (names []string, err error) { - buf := make([]byte, 4096) - for { - size := C.size_t(len(buf)) - ret := C.rbd_list(C.rados_ioctx_t(ioctx.Pointer()), - (*C.char)(unsafe.Pointer(&buf[0])), &size) - if ret == -34 { // FIXME - buf = make([]byte, size) - continue - } else if ret < 0 { - return nil, RBDError(ret) - } - tmp := bytes.Split(buf[:size-1], []byte{0}) - for _, s := range tmp { - if len(s) > 0 { - name := C.GoString((*C.char)(unsafe.Pointer(&s[0]))) - names = append(names, name) - } - } - return names, nil - } -} - -// -func GetImage(ioctx *rados.IOContext, name string) *Image { - return &Image{ - ioctx: ioctx, - name: name, - } -} - -// int rbd_create(rados_ioctx_t io, const char *name, uint64_t size, int *order); -// int rbd_create2(rados_ioctx_t io, const char *name, uint64_t size, -// uint64_t features, int *order); -// int rbd_create3(rados_ioctx_t io, const char *name, uint64_t size, -// uint64_t features, int *order, -// uint64_t stripe_unit, uint64_t stripe_count); -func Create(ioctx *rados.IOContext, name string, size uint64, order int, - args ...uint64) (image *Image, err error) { - var ret C.int - var c_order C.int = C.int(order) - var c_name *C.char = C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - - switch len(args) { - case 2: - ret = C.rbd_create3(C.rados_ioctx_t(ioctx.Pointer()), - c_name, C.uint64_t(size), - C.uint64_t(args[0]), &c_order, - C.uint64_t(args[1]), C.uint64_t(args[2])) - case 1: - ret = C.rbd_create2(C.rados_ioctx_t(ioctx.Pointer()), - c_name, C.uint64_t(size), - C.uint64_t(args[0]), &c_order) - case 0: - ret = C.rbd_create(C.rados_ioctx_t(ioctx.Pointer()), - c_name, C.uint64_t(size), &c_order) - default: - return nil, errors.New("Wrong number of argument") - } - - if ret < 0 { - return nil, RBDError(int(ret)) - } - - return &Image{ - ioctx: ioctx, - name: name, - }, nil -} - -// int rbd_clone(rados_ioctx_t p_ioctx, const char *p_name, -// const char *p_snapname, rados_ioctx_t c_ioctx, -// const char *c_name, uint64_t features, int *c_order); -// int rbd_clone2(rados_ioctx_t p_ioctx, const char *p_name, -// const char *p_snapname, rados_ioctx_t c_ioctx, -// const char *c_name, uint64_t features, int *c_order, -// uint64_t stripe_unit, int stripe_count); -func (image *Image) Clone(snapname string, c_ioctx *rados.IOContext, c_name string, features uint64, order int) (*Image, error) { - var c_order C.int = C.int(order) - var c_p_name *C.char = C.CString(image.name) - var c_p_snapname *C.char = C.CString(snapname) - var c_c_name *C.char = C.CString(c_name) - defer C.free(unsafe.Pointer(c_p_name)) - defer C.free(unsafe.Pointer(c_p_snapname)) - defer C.free(unsafe.Pointer(c_c_name)) - - ret := C.rbd_clone(C.rados_ioctx_t(image.ioctx.Pointer()), - c_p_name, c_p_snapname, - C.rados_ioctx_t(c_ioctx.Pointer()), - c_c_name, C.uint64_t(features), &c_order) - if ret < 0 { - return nil, RBDError(int(ret)) - } - - return &Image{ - ioctx: c_ioctx, - name: c_name, - }, nil -} - -// int rbd_remove(rados_ioctx_t io, const char *name); -// int rbd_remove_with_progress(rados_ioctx_t io, const char *name, -// librbd_progress_fn_t cb, void *cbdata); -func (image *Image) Remove() error { - var c_name *C.char = C.CString(image.name) - defer C.free(unsafe.Pointer(c_name)) - return GetError(C.rbd_remove(C.rados_ioctx_t(image.ioctx.Pointer()), c_name)) -} - -// int rbd_rename(rados_ioctx_t src_io_ctx, const char *srcname, const char *destname); -func (image *Image) Rename(destname string) error { - var c_srcname *C.char = C.CString(image.name) - var c_destname *C.char = C.CString(destname) - defer C.free(unsafe.Pointer(c_srcname)) - defer C.free(unsafe.Pointer(c_destname)) - err := RBDError(C.rbd_rename(C.rados_ioctx_t(image.ioctx.Pointer()), - c_srcname, c_destname)) - if err == 0 { - image.name = destname - return nil - } - return err -} - -// int rbd_open(rados_ioctx_t io, const char *name, rbd_image_t *image, const char *snap_name); -// int rbd_open_read_only(rados_ioctx_t io, const char *name, rbd_image_t *image, -// const char *snap_name); -func (image *Image) Open(args ...interface{}) error { - var c_image C.rbd_image_t - var c_name *C.char = C.CString(image.name) - var c_snap_name *C.char - var ret C.int - var read_only bool = false - - defer C.free(unsafe.Pointer(c_name)) - for _, arg := range args { - switch t := arg.(type) { - case string: - if t != "" { - c_snap_name = C.CString(t) - defer C.free(unsafe.Pointer(c_snap_name)) - } - case bool: - read_only = t - default: - return errors.New("Unexpected argument") - } - } - - if read_only { - ret = C.rbd_open_read_only(C.rados_ioctx_t(image.ioctx.Pointer()), c_name, - &c_image, c_snap_name) - } else { - ret = C.rbd_open(C.rados_ioctx_t(image.ioctx.Pointer()), c_name, - &c_image, c_snap_name) - } - - image.image = c_image - - return GetError(ret) -} - -// int rbd_close(rbd_image_t image); -func (image *Image) Close() error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - ret := C.rbd_close(image.image) - if ret != 0 { - return RBDError(ret) - } - image.image = nil - return nil -} - -// int rbd_resize(rbd_image_t image, uint64_t size); -func (image *Image) Resize(size uint64) error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - return GetError(C.rbd_resize(image.image, C.uint64_t(size))) -} - -// int rbd_stat(rbd_image_t image, rbd_image_info_t *info, size_t infosize); -func (image *Image) Stat() (info *ImageInfo, err error) { - if image.image == nil { - return nil, RbdErrorImageNotOpen - } - - var c_stat C.rbd_image_info_t - ret := C.rbd_stat(image.image, - &c_stat, C.size_t(unsafe.Sizeof(info))) - if ret < 0 { - return info, RBDError(int(ret)) - } - - return &ImageInfo{ - Size: uint64(c_stat.size), - Obj_size: uint64(c_stat.obj_size), - Num_objs: uint64(c_stat.num_objs), - Order: int(c_stat.order), - Block_name_prefix: C.GoString((*C.char)(&c_stat.block_name_prefix[0])), - Parent_pool: int64(c_stat.parent_pool), - Parent_name: C.GoString((*C.char)(&c_stat.parent_name[0]))}, nil -} - -// int rbd_get_old_format(rbd_image_t image, uint8_t *old); -func (image *Image) IsOldFormat() (old_format bool, err error) { - if image.image == nil { - return false, RbdErrorImageNotOpen - } - - var c_old_format C.uint8_t - ret := C.rbd_get_old_format(image.image, - &c_old_format) - if ret < 0 { - return false, RBDError(int(ret)) - } - - return c_old_format != 0, nil -} - -// int rbd_size(rbd_image_t image, uint64_t *size); -func (image *Image) GetSize() (size uint64, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - ret := C.rbd_get_size(image.image, - (*C.uint64_t)(&size)) - if ret < 0 { - return 0, RBDError(int(ret)) - } - - return size, nil -} - -// int rbd_get_features(rbd_image_t image, uint64_t *features); -func (image *Image) GetFeatures() (features uint64, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - ret := C.rbd_get_features(image.image, - (*C.uint64_t)(&features)) - if ret < 0 { - return 0, RBDError(int(ret)) - } - - return features, nil -} - -// int rbd_get_stripe_unit(rbd_image_t image, uint64_t *stripe_unit); -func (image *Image) GetStripeUnit() (stripe_unit uint64, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - ret := C.rbd_get_stripe_unit(image.image, (*C.uint64_t)(&stripe_unit)) - if ret < 0 { - return 0, RBDError(int(ret)) - } - - return stripe_unit, nil -} - -// int rbd_get_stripe_count(rbd_image_t image, uint64_t *stripe_count); -func (image *Image) GetStripeCount() (stripe_count uint64, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - ret := C.rbd_get_stripe_count(image.image, (*C.uint64_t)(&stripe_count)) - if ret < 0 { - return 0, RBDError(int(ret)) - } - - return stripe_count, nil -} - -// int rbd_get_overlap(rbd_image_t image, uint64_t *overlap); -func (image *Image) GetOverlap() (overlap uint64, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - ret := C.rbd_get_overlap(image.image, (*C.uint64_t)(&overlap)) - if ret < 0 { - return overlap, RBDError(int(ret)) - } - - return overlap, nil -} - -// int rbd_copy(rbd_image_t image, rados_ioctx_t dest_io_ctx, const char *destname); -// int rbd_copy2(rbd_image_t src, rbd_image_t dest); -// int rbd_copy_with_progress(rbd_image_t image, rados_ioctx_t dest_p, const char *destname, -// librbd_progress_fn_t cb, void *cbdata); -// int rbd_copy_with_progress2(rbd_image_t src, rbd_image_t dest, -// librbd_progress_fn_t cb, void *cbdata); -func (image *Image) Copy(args ...interface{}) error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - switch t := args[0].(type) { - case rados.IOContext: - switch t2 := args[1].(type) { - case string: - var c_destname *C.char = C.CString(t2) - defer C.free(unsafe.Pointer(c_destname)) - return RBDError(C.rbd_copy(image.image, - C.rados_ioctx_t(t.Pointer()), - c_destname)) - default: - return errors.New("Must specify destname") - } - case Image: - var dest Image = t - if dest.image == nil { - return errors.New(fmt.Sprintf("RBD image %s is not open", dest.name)) - } - return GetError(C.rbd_copy2(image.image, - dest.image)) - default: - return errors.New("Must specify either destination pool " + - "or destination image") - } -} - -// int rbd_flatten(rbd_image_t image); -func (image *Image) Flatten() error { - if image.image == nil { - return errors.New(fmt.Sprintf("RBD image %s is not open", image.name)) - } - - return GetError(C.rbd_flatten(image.image)) -} - -// ssize_t rbd_list_children(rbd_image_t image, char *pools, size_t *pools_len, -// char *images, size_t *images_len); -func (image *Image) ListChildren() (pools []string, images []string, err error) { - if image.image == nil { - return nil, nil, RbdErrorImageNotOpen - } - - var c_pools_len, c_images_len C.size_t - - ret := C.rbd_list_children(image.image, - nil, &c_pools_len, - nil, &c_images_len) - if ret == 0 { - return nil, nil, nil - } - if ret < 0 && ret != -C.ERANGE { - return nil, nil, RBDError(int(ret)) - } - - pools_buf := make([]byte, c_pools_len) - images_buf := make([]byte, c_images_len) - - ret = C.rbd_list_children(image.image, - (*C.char)(unsafe.Pointer(&pools_buf[0])), - &c_pools_len, - (*C.char)(unsafe.Pointer(&images_buf[0])), - &c_images_len) - if ret < 0 { - return nil, nil, RBDError(int(ret)) - } - - tmp := bytes.Split(pools_buf[:c_pools_len-1], []byte{0}) - for _, s := range tmp { - if len(s) > 0 { - name := C.GoString((*C.char)(unsafe.Pointer(&s[0]))) - pools = append(pools, name) - } - } - - tmp = bytes.Split(images_buf[:c_images_len-1], []byte{0}) - for _, s := range tmp { - if len(s) > 0 { - name := C.GoString((*C.char)(unsafe.Pointer(&s[0]))) - images = append(images, name) - } - } - - return pools, images, nil -} - -// ssize_t rbd_list_lockers(rbd_image_t image, int *exclusive, -// char *tag, size_t *tag_len, -// char *clients, size_t *clients_len, -// char *cookies, size_t *cookies_len, -// char *addrs, size_t *addrs_len); -func (image *Image) ListLockers() (tag string, lockers []Locker, err error) { - if image.image == nil { - return "", nil, RbdErrorImageNotOpen - } - - var c_exclusive C.int - var c_tag_len, c_clients_len, c_cookies_len, c_addrs_len C.size_t - var c_locker_cnt C.ssize_t - - C.rbd_list_lockers(image.image, &c_exclusive, - nil, (*C.size_t)(&c_tag_len), - nil, (*C.size_t)(&c_clients_len), - nil, (*C.size_t)(&c_cookies_len), - nil, (*C.size_t)(&c_addrs_len)) - - // no locker held on rbd image when either c_clients_len, - // c_cookies_len or c_addrs_len is *0*, so just quickly returned - if int(c_clients_len) == 0 || int(c_cookies_len) == 0 || - int(c_addrs_len) ==0 { - lockers = make([]Locker, 0) - return "", lockers, nil - } - - tag_buf := make([]byte, c_tag_len) - clients_buf := make([]byte, c_clients_len) - cookies_buf := make([]byte, c_cookies_len) - addrs_buf := make([]byte, c_addrs_len) - - c_locker_cnt = C.rbd_list_lockers(image.image, &c_exclusive, - (*C.char)(unsafe.Pointer(&tag_buf[0])), (*C.size_t)(&c_tag_len), - (*C.char)(unsafe.Pointer(&clients_buf[0])), (*C.size_t)(&c_clients_len), - (*C.char)(unsafe.Pointer(&cookies_buf[0])), (*C.size_t)(&c_cookies_len), - (*C.char)(unsafe.Pointer(&addrs_buf[0])), (*C.size_t)(&c_addrs_len)) - - // rbd_list_lockers returns negative value for errors - // and *0* means no locker held on rbd image. - // but *0* is unexpected here because first rbd_list_lockers already - // dealt with no locker case - if int(c_locker_cnt) <= 0 { - return "", nil, RBDError(int(c_locker_cnt)) - } - - clients := split(clients_buf) - cookies := split(cookies_buf) - addrs := split(addrs_buf) - - lockers = make([]Locker, c_locker_cnt) - for i := 0; i < int(c_locker_cnt); i++ { - lockers[i] = Locker{Client: clients[i], - Cookie: cookies[i], - Addr: addrs[i]} - } - - return string(tag_buf), lockers, nil -} - -// int rbd_lock_exclusive(rbd_image_t image, const char *cookie); -func (image *Image) LockExclusive(cookie string) error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - var c_cookie *C.char = C.CString(cookie) - defer C.free(unsafe.Pointer(c_cookie)) - - return GetError(C.rbd_lock_exclusive(image.image, c_cookie)) -} - -// int rbd_lock_shared(rbd_image_t image, const char *cookie, const char *tag); -func (image *Image) LockShared(cookie string, tag string) error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - var c_cookie *C.char = C.CString(cookie) - var c_tag *C.char = C.CString(tag) - defer C.free(unsafe.Pointer(c_cookie)) - defer C.free(unsafe.Pointer(c_tag)) - - return GetError(C.rbd_lock_shared(image.image, c_cookie, c_tag)) -} - -// int rbd_lock_shared(rbd_image_t image, const char *cookie, const char *tag); -func (image *Image) Unlock(cookie string) error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - var c_cookie *C.char = C.CString(cookie) - defer C.free(unsafe.Pointer(c_cookie)) - - return GetError(C.rbd_unlock(image.image, c_cookie)) -} - -// int rbd_break_lock(rbd_image_t image, const char *client, const char *cookie); -func (image *Image) BreakLock(client string, cookie string) error { - if image.image == nil { - return RbdErrorImageNotOpen - } - - var c_client *C.char = C.CString(client) - var c_cookie *C.char = C.CString(cookie) - defer C.free(unsafe.Pointer(c_client)) - defer C.free(unsafe.Pointer(c_cookie)) - - return GetError(C.rbd_break_lock(image.image, c_client, c_cookie)) -} - -// ssize_t rbd_read(rbd_image_t image, uint64_t ofs, size_t len, char *buf); -// TODO: int64_t rbd_read_iterate(rbd_image_t image, uint64_t ofs, size_t len, -// int (*cb)(uint64_t, size_t, const char *, void *), void *arg); -// TODO: int rbd_read_iterate2(rbd_image_t image, uint64_t ofs, uint64_t len, -// int (*cb)(uint64_t, size_t, const char *, void *), void *arg); -// TODO: int rbd_diff_iterate(rbd_image_t image, -// const char *fromsnapname, -// uint64_t ofs, uint64_t len, -// int (*cb)(uint64_t, size_t, int, void *), void *arg); -func (image *Image) Read(data []byte) (n int, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - if len(data) == 0 { - return 0, nil - } - - ret := int(C.rbd_read( - image.image, - (C.uint64_t)(image.offset), - (C.size_t)(len(data)), - (*C.char)(unsafe.Pointer(&data[0])))) - - if ret < 0 { - return 0, RBDError(ret) - } - - image.offset += int64(ret) - if ret < n { - return ret, io.EOF - } - - return ret, nil -} - -// ssize_t rbd_write(rbd_image_t image, uint64_t ofs, size_t len, const char *buf); -func (image *Image) Write(data []byte) (n int, err error) { - ret := int(C.rbd_write(image.image, C.uint64_t(image.offset), - C.size_t(len(data)), (*C.char)(unsafe.Pointer(&data[0])))) - - if ret >= 0 { - image.offset += int64(ret) - } - - if ret != len(data) { - err = RBDError(-1) - } - - return ret, err -} - -func (image *Image) Seek(offset int64, whence int) (int64, error) { - switch whence { - case 0: - image.offset = offset - case 1: - image.offset += offset - case 2: - stats, err := image.Stat() - if err != nil { - return 0, err - } - image.offset = int64(stats.Size) - offset - default: - return 0, errors.New("Wrong value for whence") - } - return image.offset, nil -} - -// int rbd_discard(rbd_image_t image, uint64_t ofs, uint64_t len); -func (image *Image) Discard(ofs uint64, length uint64) error { - return RBDError(C.rbd_discard(image.image, C.uint64_t(ofs), - C.uint64_t(length))) -} - -func (image *Image) ReadAt(data []byte, off int64) (n int, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - if len(data) == 0 { - return 0, nil - } - - ret := int(C.rbd_read( - image.image, - (C.uint64_t)(off), - (C.size_t)(len(data)), - (*C.char)(unsafe.Pointer(&data[0])))) - - if ret < 0 { - return 0, RBDError(ret) - } - - if ret < n { - return ret, io.EOF - } - - return ret, nil -} - -func (image *Image) WriteAt(data []byte, off int64) (n int, err error) { - if image.image == nil { - return 0, RbdErrorImageNotOpen - } - - if len(data) == 0 { - return 0, nil - } - - ret := int(C.rbd_write(image.image, C.uint64_t(off), - C.size_t(len(data)), (*C.char)(unsafe.Pointer(&data[0])))) - - if ret != len(data) { - err = RBDError(-1) - } - - return ret, err -} - -// int rbd_flush(rbd_image_t image); -func (image *Image) Flush() error { - return GetError(C.rbd_flush(image.image)) -} - -// int rbd_snap_list(rbd_image_t image, rbd_snap_info_t *snaps, int *max_snaps); -// void rbd_snap_list_end(rbd_snap_info_t *snaps); -func (image *Image) GetSnapshotNames() (snaps []SnapInfo, err error) { - if image.image == nil { - return nil, RbdErrorImageNotOpen - } - - var c_max_snaps C.int = 0 - - ret := C.rbd_snap_list(image.image, nil, &c_max_snaps) - - c_snaps := make([]C.rbd_snap_info_t, c_max_snaps) - snaps = make([]SnapInfo, c_max_snaps) - - ret = C.rbd_snap_list(image.image, - &c_snaps[0], &c_max_snaps) - if ret < 0 { - return nil, RBDError(int(ret)) - } - - for i, s := range c_snaps { - snaps[i] = SnapInfo{Id: uint64(s.id), - Size: uint64(s.size), - Name: C.GoString(s.name)} - } - - C.rbd_snap_list_end(&c_snaps[0]) - return snaps[:len(snaps)-1], nil -} - -// int rbd_snap_create(rbd_image_t image, const char *snapname); -func (image *Image) CreateSnapshot(snapname string) (*Snapshot, error) { - if image.image == nil { - return nil, RbdErrorImageNotOpen - } - - var c_snapname *C.char = C.CString(snapname) - defer C.free(unsafe.Pointer(c_snapname)) - - ret := C.rbd_snap_create(image.image, c_snapname) - if ret < 0 { - return nil, RBDError(int(ret)) - } - - return &Snapshot{ - image: image, - name: snapname, - }, nil -} - -// -func (image *Image) GetSnapshot(snapname string) *Snapshot { - return &Snapshot{ - image: image, - name: snapname, - } -} - -// int rbd_get_parent_info(rbd_image_t image, -// char *parent_pool_name, size_t ppool_namelen, char *parent_name, -// size_t pnamelen, char *parent_snap_name, size_t psnap_namelen) -func (image *Image) GetParentInfo(p_pool, p_name, p_snapname []byte) error { - ret := C.rbd_get_parent_info( - image.image, - (*C.char)(unsafe.Pointer(&p_pool[0])), - (C.size_t)(len(p_pool)), - (*C.char)(unsafe.Pointer(&p_name[0])), - (C.size_t)(len(p_name)), - (*C.char)(unsafe.Pointer(&p_snapname[0])), - (C.size_t)(len(p_snapname))) - if ret == 0 { - return nil - } else { - return RBDError(int(ret)) - } -} - -// int rbd_snap_remove(rbd_image_t image, const char *snapname); -func (snapshot *Snapshot) Remove() error { - var c_snapname *C.char = C.CString(snapshot.name) - defer C.free(unsafe.Pointer(c_snapname)) - - return GetError(C.rbd_snap_remove(snapshot.image.image, c_snapname)) -} - -// int rbd_snap_rollback(rbd_image_t image, const char *snapname); -// int rbd_snap_rollback_with_progress(rbd_image_t image, const char *snapname, -// librbd_progress_fn_t cb, void *cbdata); -func (snapshot *Snapshot) Rollback() error { - var c_snapname *C.char = C.CString(snapshot.name) - defer C.free(unsafe.Pointer(c_snapname)) - - return GetError(C.rbd_snap_rollback(snapshot.image.image, c_snapname)) -} - -// int rbd_snap_protect(rbd_image_t image, const char *snap_name); -func (snapshot *Snapshot) Protect() error { - var c_snapname *C.char = C.CString(snapshot.name) - defer C.free(unsafe.Pointer(c_snapname)) - - return GetError(C.rbd_snap_protect(snapshot.image.image, c_snapname)) -} - -// int rbd_snap_unprotect(rbd_image_t image, const char *snap_name); -func (snapshot *Snapshot) Unprotect() error { - var c_snapname *C.char = C.CString(snapshot.name) - defer C.free(unsafe.Pointer(c_snapname)) - - return GetError(C.rbd_snap_unprotect(snapshot.image.image, c_snapname)) -} - -// int rbd_snap_is_protected(rbd_image_t image, const char *snap_name, -// int *is_protected); -func (snapshot *Snapshot) IsProtected() (bool, error) { - var c_is_protected C.int - var c_snapname *C.char = C.CString(snapshot.name) - defer C.free(unsafe.Pointer(c_snapname)) - - ret := C.rbd_snap_is_protected(snapshot.image.image, c_snapname, - &c_is_protected) - if ret < 0 { - return false, RBDError(int(ret)) - } - - return c_is_protected != 0, nil -} - -// int rbd_snap_set(rbd_image_t image, const char *snapname); -func (snapshot *Snapshot) Set() error { - var c_snapname *C.char = C.CString(snapshot.name) - defer C.free(unsafe.Pointer(c_snapname)) - - return GetError(C.rbd_snap_set(snapshot.image.image, c_snapname)) -} diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/coreos/etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE deleted file mode 100644 index b39ddfa5c..000000000 --- a/vendor/github.com/coreos/etcd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go deleted file mode 100644 index 1a940c39b..000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ /dev/null @@ -1,807 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: auth.proto - -/* - Package authpb is a generated protocol buffer package. - - It is generated from these files: - auth.proto - - It has these top-level messages: - User - Permission - Role -*/ -package authpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Permission_Type int32 - -const ( - READ Permission_Type = 0 - WRITE Permission_Type = 1 - READWRITE Permission_Type = 2 -) - -var Permission_Type_name = map[int32]string{ - 0: "READ", - 1: "WRITE", - 2: "READWRITE", -} -var Permission_Type_value = map[string]int32{ - "READ": 0, - "WRITE": 1, - "READWRITE": 2, -} - -func (x Permission_Type) String() string { - return proto.EnumName(Permission_Type_name, int32(x)) -} -func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} } - -// User is a single entry in the bucket authUsers -type User struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } - -// Permission is a single entity -type Permission struct { - PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *Permission) Reset() { *m = Permission{} } -func (m *Permission) String() string { return proto.CompactTextString(m) } -func (*Permission) ProtoMessage() {} -func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } - -// Role is a single entry in the bucket authRoles -type Role struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` -} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } - -func init() { - proto.RegisterType((*User)(nil), "authpb.User") - proto.RegisterType((*Permission)(nil), "authpb.Permission") - proto.RegisterType((*Role)(nil), "authpb.Role") - proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) -} -func (m *User) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *User) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Permission) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Permission) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PermType != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.KeyPermission) > 0 { - for _, msg := range m.KeyPermission { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *User) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func (m *Permission) Size() (n int) { - var l int - _ = l - if m.PermType != 0 { - n += 1 + sovAuth(uint64(m.PermType)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.KeyPermission) > 0 { - for _, e := range m.KeyPermission { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func sovAuth(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *User) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: User: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) - if m.Password == nil { - m.Password = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Permission) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Permission: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) - } - m.PermType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PermType |= (Permission_Type(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyPermission = append(m.KeyPermission, &Permission{}) - if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuth(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthAuth - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAuth(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } - -var fileDescriptorAuth = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, - 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, - 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, - 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, - 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, - 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, - 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, - 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, - 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, - 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, - 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, - 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, - 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, - 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, - 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, - 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, - 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto deleted file mode 100644 index 001d33435..000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package authpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -// User is a single entry in the bucket authUsers -message User { - bytes name = 1; - bytes password = 2; - repeated string roles = 3; -} - -// Permission is a single entity -message Permission { - enum Type { - READ = 0; - WRITE = 1; - READWRITE = 2; - } - Type permType = 1; - - bytes key = 2; - bytes range_end = 3; -} - -// Role is a single entry in the bucket authRoles -message Role { - bytes name = 1; - - repeated Permission keyPermission = 2; -} diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md deleted file mode 100644 index 376bfba76..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# etcd/clientv3 - -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) - -`etcd/clientv3` is the official Go etcd client for v3. - -## Install - -```bash -go get github.com/coreos/etcd/clientv3 -``` - -## Get started - -Create client using `clientv3.New`: - -```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! -} -defer cli.Close() -``` - -etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses -[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. -If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, -pass `context.WithTimeout` to APIs: - -```go -ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := cli.Put(ctx, "sample_key", "sample_value") -cancel() -if err != nil { - // handle error! -} -// use the response -``` - -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). - -## Error Handling - -etcd client returns 2 types of errors: - -1. context error: canceled or deadline exceeded. -2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes). - -Here is the example code to handle client errors: - -```go -resp, err := cli.Put(ctx, "", "") -if err != nil { - switch err { - case context.Canceled: - log.Fatalf("ctx is canceled by another routine: %v", err) - case context.DeadlineExceeded: - log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) - case rpctypes.ErrEmptyKey: - log.Fatalf("client-side error: %v", err) - default: - log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) - } -} -``` - -## Metrics - -The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). - -## Namespacing - -The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. - -## Examples - -More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go deleted file mode 100644 index 7545bb6ca..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "strings" - - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -type ( - AuthEnableResponse pb.AuthEnableResponse - AuthDisableResponse pb.AuthDisableResponse - AuthenticateResponse pb.AuthenticateResponse - AuthUserAddResponse pb.AuthUserAddResponse - AuthUserDeleteResponse pb.AuthUserDeleteResponse - AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse - AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse - AuthUserGetResponse pb.AuthUserGetResponse - AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse - AuthRoleAddResponse pb.AuthRoleAddResponse - AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse - AuthRoleGetResponse pb.AuthRoleGetResponse - AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse - AuthRoleDeleteResponse pb.AuthRoleDeleteResponse - AuthUserListResponse pb.AuthUserListResponse - AuthRoleListResponse pb.AuthRoleListResponse - - PermissionType authpb.Permission_Type - Permission authpb.Permission -) - -const ( - PermRead = authpb.READ - PermWrite = authpb.WRITE - PermReadWrite = authpb.READWRITE -) - -type Auth interface { - // AuthEnable enables auth of an etcd cluster. - AuthEnable(ctx context.Context) (*AuthEnableResponse, error) - - // AuthDisable disables auth of an etcd cluster. - AuthDisable(ctx context.Context) (*AuthDisableResponse, error) - - // UserAdd adds a new user to an etcd cluster. - UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) - - // UserDelete deletes a user from an etcd cluster. - UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user. - UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to a user. - UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) - - // UserGet gets a detailed information of a user. - UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) - - // UserList gets a list of all users. - UserList(ctx context.Context) (*AuthUserListResponse, error) - - // UserRevokeRole revokes a role of a user. - UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role to an etcd cluster. - RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role. - RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) - - // RoleGet gets a detailed information of a role. - RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) - - // RoleList gets a list of all roles. - RoleList(ctx context.Context) (*AuthRoleListResponse, error) - - // RoleRevokePermission revokes a permission from a role. - RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) - - // RoleDelete deletes a role. - RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) -} - -type auth struct { - remote pb.AuthClient - callOpts []grpc.CallOption -} - -func NewAuth(c *Client) Auth { - api := &auth{remote: RetryAuthClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) - return (*AuthEnableResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) - return (*AuthDisableResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) - return (*AuthUserListResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { - perm := &authpb.Permission{ - Key: []byte(key), - RangeEnd: []byte(rangeEnd), - PermType: authpb.Permission_Type(permType), - } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) -} - -func StrToPermissionType(s string) (PermissionType, error) { - val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] - if ok { - return PermissionType(val), nil - } - return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) -} - -type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient - callOpts []grpc.CallOption -} - -func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthenticateResponse)(resp), toErr(ctx, err) -} - -func (auth *authenticator) close() { - auth.conn.Close() -} - -func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return nil, err - } - - api := &authenticator{ - conn: conn, - remote: pb.NewAuthClient(conn), - } - if c != nil { - api.callOpts = c.callOpts - } - return api, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go deleted file mode 100644 index 713280776..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -var ( - ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") - ErrOldCluster = errors.New("etcdclient: old cluster version") -) - -// Client provides and manages an etcd v3 client session. -type Client struct { - Cluster - KV - Lease - Watcher - Auth - Maintenance - - conn *grpc.ClientConn - dialerrc chan error - - cfg Config - creds *credentials.TransportCredentials - balancer *healthBalancer - mu *sync.Mutex - - ctx context.Context - cancel context.CancelFunc - - // Username is a user name for authentication. - Username string - // Password is a password for authentication. - Password string - // tokenCred is an instance of WithPerRPCCredentials()'s argument - tokenCred *authTokenCredential - - callOpts []grpc.CallOption -} - -// New creates a new etcdv3 client from a given configuration. -func New(cfg Config) (*Client, error) { - if len(cfg.Endpoints) == 0 { - return nil, ErrNoAvailableEndpoints - } - - return newClient(&cfg) -} - -// NewCtxClient creates a client with a context but no underlying grpc -// connection. This is useful for embedded cases that override the -// service interface implementations and do not need connection management. -func NewCtxClient(ctx context.Context) *Client { - cctx, cancel := context.WithCancel(ctx) - return &Client{ctx: cctx, cancel: cancel} -} - -// NewFromURL creates a new etcdv3 client from a URL. -func NewFromURL(url string) (*Client, error) { - return New(Config{Endpoints: []string{url}}) -} - -// Close shuts down the client's etcd connections. -func (c *Client) Close() error { - c.cancel() - c.Watcher.Close() - c.Lease.Close() - if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) - } - return c.ctx.Err() -} - -// Ctx is a context for "out of band" messages (e.g., for sending -// "clean up" message when another context is canceled). It is -// canceled on client Close(). -func (c *Client) Ctx() context.Context { return c.ctx } - -// Endpoints lists the registered endpoints for the client. -func (c *Client) Endpoints() (eps []string) { - // copy the slice; protect original endpoints from being changed - eps = make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) - return -} - -// SetEndpoints updates client's endpoints. -func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() - c.cfg.Endpoints = eps - c.mu.Unlock() - c.balancer.updateAddrs(eps...) - - // updating notifyCh can trigger new connections, - // need update addrs if all connections are down - // or addrs does not include pinAddr. - c.balancer.mu.RLock() - update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) - c.balancer.mu.RUnlock() - if update { - select { - case c.balancer.updateAddrsC <- notifyNext: - case <-c.balancer.stopc: - } - } -} - -// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. -func (c *Client) Sync(ctx context.Context) error { - mresp, err := c.MemberList(ctx) - if err != nil { - return err - } - var eps []string - for _, m := range mresp.Members { - eps = append(eps, m.ClientURLs...) - } - c.SetEndpoints(eps...) - return nil -} - -func (c *Client) autoSync() { - if c.cfg.AutoSyncInterval == time.Duration(0) { - return - } - - for { - select { - case <-c.ctx.Done(): - return - case <-time.After(c.cfg.AutoSyncInterval): - ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) - err := c.Sync(ctx) - cancel() - if err != nil && err != c.ctx.Err() { - logger.Println("Auto sync endpoints failed:", err) - } - } - } -} - -type authTokenCredential struct { - token string - tokenMu *sync.RWMutex -} - -func (cred authTokenCredential) RequireTransportSecurity() bool { - return false -} - -func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { - cred.tokenMu.RLock() - defer cred.tokenMu.RUnlock() - return map[string]string{ - "token": cred.token, - }, nil -} - -func parseEndpoint(endpoint string) (proto string, host string, scheme string) { - proto = "tcp" - host = endpoint - url, uerr := url.Parse(endpoint) - if uerr != nil || !strings.Contains(endpoint, "://") { - return proto, host, scheme - } - scheme = url.Scheme - - // strip scheme:// prefix since grpc dials by host - host = url.Host - switch url.Scheme { - case "http", "https": - case "unix", "unixs": - proto = "unix" - host = url.Host + url.Path - default: - proto, host = "", "" - } - return proto, host, scheme -} - -func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { - creds = c.creds - switch scheme { - case "unix": - case "http": - creds = nil - case "https", "unixs": - if creds != nil { - break - } - tlsconfig := &tls.Config{} - emptyCreds := credentials.NewTLS(tlsconfig) - creds = &emptyCreds - default: - creds = nil - } - return creds -} - -// dialSetupOpts gives the dial opts prior to any authentication -func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { - if c.cfg.DialTimeout > 0 { - opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} - } - if c.cfg.DialKeepAliveTime > 0 { - params := keepalive.ClientParameters{ - Time: c.cfg.DialKeepAliveTime, - Timeout: c.cfg.DialKeepAliveTimeout, - } - opts = append(opts, grpc.WithKeepaliveParams(params)) - } - opts = append(opts, dopts...) - - f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) - if host == "" && endpoint != "" { - // dialing an endpoint not in the balancer; use - // endpoint passed into dial - proto, host, _ = parseEndpoint(endpoint) - } - if proto == "" { - return nil, fmt.Errorf("unknown scheme for %q", host) - } - select { - case <-c.ctx.Done(): - return nil, c.ctx.Err() - default: - } - dialer := &net.Dialer{Timeout: t} - conn, err := dialer.DialContext(c.ctx, proto, host) - if err != nil { - select { - case c.dialerrc <- err: - default: - } - } - return conn, err - } - opts = append(opts, grpc.WithDialer(f)) - - creds := c.creds - if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { - creds = c.processCreds(scheme) - } - if creds != nil { - opts = append(opts, grpc.WithTransportCredentials(*creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - - return opts -} - -// Dial connects to a single endpoint using the client's config. -func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { - return c.dial(endpoint) -} - -func (c *Client) getToken(ctx context.Context) error { - var err error // return last error in a case of fail - var auth *authenticator - - for i := 0; i < len(c.cfg.Endpoints); i++ { - endpoint := c.cfg.Endpoints[i] - host := getHost(endpoint) - // use dial options without dopts to avoid reusing the client balancer - auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c) - if err != nil { - continue - } - defer auth.close() - - var resp *AuthenticateResponse - resp, err = auth.authenticate(ctx, c.Username, c.Password) - if err != nil { - continue - } - - c.tokenCred.tokenMu.Lock() - c.tokenCred.token = resp.Token - c.tokenCred.tokenMu.Unlock() - - return nil - } - - return err -} - -func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts := c.dialSetupOpts(endpoint, dopts...) - host := getHost(endpoint) - if c.Username != "" && c.Password != "" { - c.tokenCred = &authTokenCredential{ - tokenMu: &sync.RWMutex{}, - } - - ctx := c.ctx - if c.cfg.DialTimeout > 0 { - cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) - defer cancel() - ctx = cctx - } - - err := c.getToken(ctx) - if err != nil { - if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = context.DeadlineExceeded - } - return nil, err - } - } else { - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) - } - } - - opts = append(opts, c.cfg.DialOptions...) - - conn, err := grpc.DialContext(c.ctx, host, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -// WithRequireLeader requires client requests to only succeed -// when the cluster has a leader. -func WithRequireLeader(ctx context.Context) context.Context { - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewOutgoingContext(ctx, md) -} - -func newClient(cfg *Config) (*Client, error) { - if cfg == nil { - cfg = &Config{} - } - var creds *credentials.TransportCredentials - if cfg.TLS != nil { - c := credentials.NewTLS(cfg.TLS) - creds = &c - } - - // use a temporary skeleton client to bootstrap first connection - baseCtx := context.TODO() - if cfg.Context != nil { - baseCtx = cfg.Context - } - - ctx, cancel := context.WithCancel(baseCtx) - client := &Client{ - conn: nil, - dialerrc: make(chan error, 1), - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, - mu: new(sync.Mutex), - callOpts: defaultCallOpts, - } - if cfg.Username != "" && cfg.Password != "" { - client.Username = cfg.Username - client.Password = cfg.Password - } - if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { - if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { - return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) - } - callOpts := []grpc.CallOption{ - defaultFailFast, - defaultMaxCallSendMsgSize, - defaultMaxCallRecvMsgSize, - } - if cfg.MaxCallSendMsgSize > 0 { - callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) - } - if cfg.MaxCallRecvMsgSize > 0 { - callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) - } - client.callOpts = callOpts - } - - client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { - return grpcHealthCheck(client, ep) - }) - - // use Endpoints[0] so that for https:// without any tls config given, then - // grpc will assume the certificate server name is the endpoint host. - conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) - if err != nil { - client.cancel() - client.balancer.Close() - return nil, err - } - client.conn = conn - - // wait for a connection - if cfg.DialTimeout > 0 { - hasConn := false - waitc := time.After(cfg.DialTimeout) - select { - case <-client.balancer.ready(): - hasConn = true - case <-ctx.Done(): - case <-waitc: - } - if !hasConn { - err := context.DeadlineExceeded - select { - case err = <-client.dialerrc: - default: - } - client.cancel() - client.balancer.Close() - conn.Close() - return nil, err - } - } - - client.Cluster = NewCluster(client) - client.KV = NewKV(client) - client.Lease = NewLease(client) - client.Watcher = NewWatcher(client) - client.Auth = NewAuth(client) - client.Maintenance = NewMaintenance(client) - - if cfg.RejectOldCluster { - if err := client.checkVersion(); err != nil { - client.Close() - return nil, err - } - } - - go client.autoSync() - return client, nil -} - -func (c *Client) checkVersion() (err error) { - var wg sync.WaitGroup - errc := make(chan error, len(c.cfg.Endpoints)) - ctx, cancel := context.WithCancel(c.ctx) - if c.cfg.DialTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) - } - wg.Add(len(c.cfg.Endpoints)) - for _, ep := range c.cfg.Endpoints { - // if cluster is current, any endpoint gives a recent version - go func(e string) { - defer wg.Done() - resp, rerr := c.Status(ctx, e) - if rerr != nil { - errc <- rerr - return - } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - maj, _ = strconv.Atoi(vs[0]) - min, rerr = strconv.Atoi(vs[1]) - } - if maj < 3 || (maj == 3 && min < 2) { - rerr = ErrOldCluster - } - errc <- rerr - }(ep) - } - // wait for success - for i := 0; i < len(c.cfg.Endpoints); i++ { - if err = <-errc; err == nil { - break - } - } - cancel() - wg.Wait() - return err -} - -// ActiveConnection returns the current in-use connection -func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } - -// isHaltErr returns true if the given error and context indicate no forward -// progress can be made, even after reconnecting. -func isHaltErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return true - } - if err == nil { - return false - } - ev, _ := status.FromError(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - // Treat Internal codes as if something failed, leaving the - // system in an inconsistent state, but retrying could make progress. - // (e.g., failed in middle of send, corrupted frame) - // TODO: are permanent Internal errors possible from grpc? - return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal -} - -// isUnavailableErr returns true if the given error is an unavailable error -func isUnavailableErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return false - } - if err == nil { - return false - } - ev, _ := status.FromError(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - return ev.Code() == codes.Unavailable -} - -func toErr(ctx context.Context, err error) error { - if err == nil { - return nil - } - err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { - return err - } - ev, _ := status.FromError(err) - code := ev.Code() - switch code { - case codes.DeadlineExceeded: - fallthrough - case codes.Canceled: - if ctx.Err() != nil { - err = ctx.Err() - } - case codes.Unavailable: - case codes.FailedPrecondition: - err = grpc.ErrClientConnClosing - } - return err -} - -func canceledByCaller(stopCtx context.Context, err error) bool { - if stopCtx.Err() == nil || err == nil { - return false - } - - return err == context.Canceled || err == context.DeadlineExceeded -} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go deleted file mode 100644 index 785672be8..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - - "google.golang.org/grpc" -) - -type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse -) - -type Cluster interface { - // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) - - // MemberAdd adds a new member into the cluster. - MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) - - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) - - // MemberUpdate updates the peer addresses of the member. - MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) -} - -type cluster struct { - remote pb.ClusterClient - callOpts []grpc.CallOption -} - -func NewCluster(c *Client) Cluster { - api := &cluster{remote: RetryClusterClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { - api := &cluster{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - // fail-fast before panic in rafthttp - if _, err := types.NewURLs(peerAddrs); err != nil { - return nil, err - } - - r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberAddResponse)(resp), nil -} - -func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { - r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberRemoveResponse)(resp), nil -} - -func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { - // fail-fast before panic in rafthttp - if _, err := types.NewURLs(peerAddrs); err != nil { - return nil, err - } - - // it is safe to retry on update. - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) - if err == nil { - return (*MemberListResponse)(resp), nil - } - return nil, toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go deleted file mode 100644 index 41e80c1da..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -// CompactOp represents a compact operation. -type CompactOp struct { - revision int64 - physical bool -} - -// CompactOption configures compact operation. -type CompactOption func(*CompactOp) - -func (op *CompactOp) applyCompactOpts(opts []CompactOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpCompact wraps slice CompactOption to create a CompactOp. -func OpCompact(rev int64, opts ...CompactOption) CompactOp { - ret := CompactOp{revision: rev} - ret.applyCompactOpts(opts) - return ret -} - -func (op CompactOp) toRequest() *pb.CompactionRequest { - return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} -} - -// WithCompactPhysical makes Compact wait until all compacted entries are -// removed from the etcd server's storage. -func WithCompactPhysical() CompactOption { - return func(op *CompactOp) { op.physical = true } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go deleted file mode 100644 index b5f0a2552..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type CompareTarget int -type CompareResult int - -const ( - CompareVersion CompareTarget = iota - CompareCreated - CompareModified - CompareValue -) - -type Cmp pb.Compare - -func Compare(cmp Cmp, result string, v interface{}) Cmp { - var r pb.Compare_CompareResult - - switch result { - case "=": - r = pb.Compare_EQUAL - case "!=": - r = pb.Compare_NOT_EQUAL - case ">": - r = pb.Compare_GREATER - case "<": - r = pb.Compare_LESS - default: - panic("Unknown result op") - } - - cmp.Result = r - switch cmp.Target { - case pb.Compare_VALUE: - val, ok := v.(string) - if !ok { - panic("bad compare value") - } - cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} - case pb.Compare_VERSION: - cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} - case pb.Compare_CREATE: - cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} - case pb.Compare_MOD: - cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} - case pb.Compare_LEASE: - cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} - default: - panic("Unknown compare type") - } - return cmp -} - -func Value(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} -} - -func Version(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} -} - -func CreateRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} -} - -func ModRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_MOD} -} - -// LeaseValue compares a key's LeaseID to a value of your choosing. The empty -// LeaseID is 0, otherwise known as `NoLease`. -func LeaseValue(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} -} - -// KeyBytes returns the byte slice holding with the comparison key. -func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } - -// WithKeyBytes sets the byte slice for the comparison key. -func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } - -// ValueBytes returns the byte slice holding the comparison value, if any. -func (cmp *Cmp) ValueBytes() []byte { - if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { - return tu.Value - } - return nil -} - -// WithValueBytes sets the byte slice for the comparison's value. -func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } - -// WithRange sets the comparison to scan the range [key, end). -func (cmp Cmp) WithRange(end string) Cmp { - cmp.RangeEnd = []byte(end) - return cmp -} - -// WithPrefix sets the comparison to scan all keys prefixed by the key. -func (cmp Cmp) WithPrefix() Cmp { - cmp.RangeEnd = getPrefix(cmp.Key) - return cmp -} - -// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. -func mustInt64(val interface{}) int64 { - if v, ok := val.(int64); ok { - return v - } - if v, ok := val.(int); ok { - return int64(v) - } - panic("bad value") -} - -// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an -// int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { - if v, ok := val.(LeaseID); ok { - return int64(v) - } - return mustInt64(val) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go deleted file mode 100644 index 79d6e2a98..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "time" - - "google.golang.org/grpc" -) - -type Config struct { - // Endpoints is a list of URLs. - Endpoints []string `json:"endpoints"` - - // AutoSyncInterval is the interval to update endpoints with its latest members. - // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - - // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration `json:"dial-timeout"` - - // DialKeepAliveTime is the time after which client pings the server to see if - // transport is alive. - DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` - - // DialKeepAliveTimeout is the time that the client waits for a response for the - // keep-alive probe. If the response is not received in this time, the connection is closed. - DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` - - // MaxCallSendMsgSize is the client-side request send limit in bytes. - // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). - // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). - MaxCallSendMsgSize int - - // MaxCallRecvMsgSize is the client-side response receive limit. - // If 0, it defaults to "math.MaxInt32", because range response can - // easily exceed request send limits. - // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). - MaxCallRecvMsgSize int - - // TLS holds the client secure credentials, if any. - TLS *tls.Config - - // Username is a user name for authentication. - Username string `json:"username"` - - // Password is a password for authentication. - Password string `json:"password"` - - // RejectOldCluster when set will refuse to create a client against an outdated cluster. - RejectOldCluster bool `json:"reject-old-cluster"` - - // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). - DialOptions []grpc.DialOption - - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context -} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go deleted file mode 100644 index 717fbe435..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3 implements the official Go etcd client for v3. -// -// Create client using `clientv3.New`: -// -// // expect dial time-out on ipv4 blackhole -// _, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"http://254.0.0.1:12345"}, -// DialTimeout: 2 * time.Second -// }) -// -// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 -// if err == context.DeadlineExceeded { -// // handle errors -// } -// -// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 -// if err == grpc.ErrClientConnTimeout { -// // handle errors -// } -// -// cli, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, -// DialTimeout: 5 * time.Second, -// }) -// if err != nil { -// // handle error! -// } -// defer cli.Close() -// -// Make sure to close the client after using it. If the client is not closed, the -// connection will have leaky goroutines. -// -// To specify a client request timeout, wrap the context with context.WithTimeout: -// -// ctx, cancel := context.WithTimeout(context.Background(), timeout) -// resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() -// if err != nil { -// // handle error! -// } -// // use the response -// -// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. -// Clients are safe for concurrent use by multiple goroutines. -// -// etcd client returns 3 types of errors: -// -// 1. context error: canceled or deadline exceeded. -// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go -// -// Here is the example code to handle client errors: -// -// resp, err := kvc.Put(ctx, "", "") -// if err != nil { -// if err == context.Canceled { -// // ctx is canceled by another routine -// } else if err == context.DeadlineExceeded { -// // ctx is attached with a deadline and it exceeded -// } else if ev, ok := status.FromError(err); ok { -// code := ev.Code() -// if code == codes.DeadlineExceeded { -// // server-side context might have timed-out first (due to clock skew) -// // while original client-side context is not timed-out yet -// } -// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { -// // process (verr.Errors) -// } else { -// // bad cluster endpoints, which are not etcd servers -// } -// } -// -// go func() { cli.Close() }() -// _, err := kvc.Get(ctx, "a") -// if err != nil { -// if err == context.Canceled { -// // grpc balancer calls 'Get' with an inflight client.Close -// } else if err == grpc.ErrClientConnClosing { -// // grpc balancer calls 'Get' after client.Close. -// } -// } -// -package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go deleted file mode 100644 index 5918cba84..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go +++ /dev/null @@ -1,609 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "errors" - "net/url" - "strings" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -const ( - minHealthRetryDuration = 3 * time.Second - unknownService = "unknown service grpc.health.v1.Health" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") - -type healthCheckFunc func(ep string) (bool, error) - -type notifyMsg int - -const ( - notifyReset notifyMsg = iota - notifyNext -) - -// healthBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type healthBalancer struct { - // addrs are the client's endpoint addresses for grpc - addrs []grpc.Address - - // eps holds the raw endpoints from the client - eps []string - - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // healthCheck checks an endpoint's health. - healthCheck healthCheckFunc - healthCheckTimeout time.Duration - - unhealthyMu sync.RWMutex - unhealthyHostPorts map[string]time.Time - - // mu protects all fields below. - mu sync.RWMutex - - // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. - upc chan struct{} - - // downc closes when grpc calls down() on pinAddr - downc chan struct{} - - // stopc is closed to signal updateNotifyLoop should stop. - stopc chan struct{} - stopOnce sync.Once - wg sync.WaitGroup - - // donec closes when all goroutines are exited - donec chan struct{} - - // updateAddrsC notifies updateNotifyLoop to update addrs. - updateAddrsC chan notifyMsg - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - hostPort2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // initialization and shutdown. - pinAddr string - - closed bool -} - -func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { - notifyCh := make(chan []grpc.Address) - addrs := eps2addrs(eps) - hb := &healthBalancer{ - addrs: addrs, - eps: eps, - notifyCh: notifyCh, - readyc: make(chan struct{}), - healthCheck: hc, - unhealthyHostPorts: make(map[string]time.Time), - upc: make(chan struct{}), - stopc: make(chan struct{}), - downc: make(chan struct{}), - donec: make(chan struct{}), - updateAddrsC: make(chan notifyMsg), - hostPort2ep: getHostPort2ep(eps), - } - if timeout < minHealthRetryDuration { - timeout = minHealthRetryDuration - } - hb.healthCheckTimeout = timeout - - close(hb.downc) - go hb.updateNotifyLoop() - hb.wg.Add(1) - go func() { - defer hb.wg.Done() - hb.updateUnhealthy() - }() - return hb -} - -func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *healthBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } - -func (b *healthBalancer) endpoint(hostPort string) string { - b.mu.RLock() - defer b.mu.RUnlock() - return b.hostPort2ep[hostPort] -} - -func (b *healthBalancer) pinned() string { - b.mu.RLock() - defer b.mu.RUnlock() - return b.pinAddr -} - -func (b *healthBalancer) hostPortError(hostPort string, err error) { - if b.endpoint(hostPort) == "" { - logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) - return - } - - b.unhealthyMu.Lock() - b.unhealthyHostPorts[hostPort] = time.Now() - b.unhealthyMu.Unlock() - logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) -} - -func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { - if b.endpoint(hostPort) == "" { - logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) - return - } - - b.unhealthyMu.Lock() - delete(b.unhealthyHostPorts, hostPort) - b.unhealthyMu.Unlock() - logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) -} - -func (b *healthBalancer) countUnhealthy() (count int) { - b.unhealthyMu.RLock() - count = len(b.unhealthyHostPorts) - b.unhealthyMu.RUnlock() - return count -} - -func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { - b.unhealthyMu.RLock() - _, unhealthy = b.unhealthyHostPorts[hostPort] - b.unhealthyMu.RUnlock() - return unhealthy -} - -func (b *healthBalancer) cleanupUnhealthy() { - b.unhealthyMu.Lock() - for k, v := range b.unhealthyHostPorts { - if time.Since(v) > b.healthCheckTimeout { - delete(b.unhealthyHostPorts, k) - logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) - } - } - b.unhealthyMu.Unlock() -} - -func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { - unhealthyCnt := b.countUnhealthy() - - b.mu.RLock() - defer b.mu.RUnlock() - - hbAddrs := b.addrs - if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { - liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) - for k := range b.hostPort2ep { - liveHostPorts[k] = struct{}{} - } - return hbAddrs, liveHostPorts - } - - addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) - liveHostPorts := make(map[string]struct{}, len(addrs)) - for _, addr := range b.addrs { - if !b.isUnhealthy(addr.Addr) { - addrs = append(addrs, addr) - liveHostPorts[addr.Addr] = struct{}{} - } - } - return addrs, liveHostPorts -} - -func (b *healthBalancer) updateUnhealthy() { - for { - select { - case <-time.After(b.healthCheckTimeout): - b.cleanupUnhealthy() - pinned := b.pinned() - if pinned == "" || b.isUnhealthy(pinned) { - select { - case b.updateAddrsC <- notifyNext: - case <-b.stopc: - return - } - } - case <-b.stopc: - return - } - } -} - -func (b *healthBalancer) updateAddrs(eps ...string) { - np := getHostPort2ep(eps) - - b.mu.Lock() - defer b.mu.Unlock() - - match := len(np) == len(b.hostPort2ep) - if match { - for k, v := range np { - if b.hostPort2ep[k] != v { - match = false - break - } - } - } - if match { - // same endpoints, so no need to update address - return - } - - b.hostPort2ep = np - b.addrs, b.eps = eps2addrs(eps), eps - - b.unhealthyMu.Lock() - b.unhealthyHostPorts = make(map[string]time.Time) - b.unhealthyMu.Unlock() -} - -func (b *healthBalancer) next() { - b.mu.RLock() - downc := b.downc - b.mu.RUnlock() - select { - case b.updateAddrsC <- notifyNext: - case <-b.stopc: - } - // wait until disconnect so new RPCs are not issued on old connection - select { - case <-downc: - case <-b.stopc: - } -} - -func (b *healthBalancer) updateNotifyLoop() { - defer close(b.donec) - - for { - b.mu.RLock() - upc, downc, addr := b.upc, b.downc, b.pinAddr - b.mu.RUnlock() - // downc or upc should be closed - select { - case <-downc: - downc = nil - default: - } - select { - case <-upc: - upc = nil - default: - } - switch { - case downc == nil && upc == nil: - // stale - select { - case <-b.stopc: - return - default: - } - case downc == nil: - b.notifyAddrs(notifyReset) - select { - case <-upc: - case msg := <-b.updateAddrsC: - b.notifyAddrs(msg) - case <-b.stopc: - return - } - case upc == nil: - select { - // close connections that are not the pinned address - case b.notifyCh <- []grpc.Address{{Addr: addr}}: - case <-downc: - case <-b.stopc: - return - } - select { - case <-downc: - b.notifyAddrs(notifyReset) - case msg := <-b.updateAddrsC: - b.notifyAddrs(msg) - case <-b.stopc: - return - } - } - } -} - -func (b *healthBalancer) notifyAddrs(msg notifyMsg) { - if msg == notifyNext { - select { - case b.notifyCh <- []grpc.Address{}: - case <-b.stopc: - return - } - } - b.mu.RLock() - pinAddr := b.pinAddr - downc := b.downc - b.mu.RUnlock() - addrs, hostPorts := b.liveAddrs() - - var waitDown bool - if pinAddr != "" { - _, ok := hostPorts[pinAddr] - waitDown = !ok - } - - select { - case b.notifyCh <- addrs: - if waitDown { - select { - case <-downc: - case <-b.stopc: - } - } - case <-b.stopc: - } -} - -func (b *healthBalancer) Up(addr grpc.Address) func(error) { - if !b.mayPin(addr) { - return func(err error) {} - } - - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Otherwise, will panic - // if b.upc is already closed. - if b.closed { - return func(err error) {} - } - - // gRPC might call Up on a stale address. - // Prevent updating pinAddr with a stale address. - if !hasAddr(b.addrs, addr.Addr) { - return func(err error) {} - } - - if b.pinAddr != "" { - logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) - return func(err error) {} - } - - // notify waiting Get()s and pin first connected address - close(b.upc) - b.downc = make(chan struct{}) - b.pinAddr = addr.Addr - logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr) - - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - - return func(err error) { - // If connected to a black hole endpoint or a killed server, the gRPC ping - // timeout will induce a network I/O error, and retrying until success; - // finding healthy endpoint on retry could take several timeouts and redials. - // To avoid wasting retries, gray-list unhealthy endpoints. - b.hostPortError(addr.Addr, err) - - b.mu.Lock() - b.upc = make(chan struct{}) - close(b.downc) - b.pinAddr = "" - b.mu.Unlock() - logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) - } -} - -func (b *healthBalancer) mayPin(addr grpc.Address) bool { - if b.endpoint(addr.Addr) == "" { // stale host:port - return false - } - - b.unhealthyMu.RLock() - unhealthyCnt := len(b.unhealthyHostPorts) - failedTime, bad := b.unhealthyHostPorts[addr.Addr] - b.unhealthyMu.RUnlock() - - b.mu.RLock() - skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt - b.mu.RUnlock() - if skip || !bad { - return true - } - - // prevent isolated member's endpoint from being infinitely retried, as follows: - // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm - // 2. balancer 'Up' unpins with grpc: failed with network I/O error - // 3. grpc-healthcheck still SERVING, thus retry to pin - // instead, return before grpc-healthcheck if failed within healthcheck timeout - if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { - logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) - return false - } - - if ok, _ := b.healthCheck(addr.Addr); ok { - b.removeUnhealthy(addr.Addr, "health check success") - return true - } - - b.hostPortError(addr.Addr, errors.New("health check failed")) - return false -} - -func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var ( - addr string - closed bool - ) - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr == "" { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-b.donec: - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr != "" { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *healthBalancer) Close() error { - b.mu.Lock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - b.mu.Unlock() - <-b.donec - return nil - } - b.closed = true - b.stopOnce.Do(func() { close(b.stopc) }) - b.pinAddr = "" - - // In the case of following scenario: - // 1. upc is not closed; no pinned address - // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks - // 3. client.conn.Close() calls balancer.Close(); closed = true - // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled - // we must close upc so Get() exits from blocking on upc - select { - case <-b.upc: - default: - // terminate all waiting Get()s - close(b.upc) - } - - b.mu.Unlock() - b.wg.Wait() - - // wait for updateNotifyLoop to finish - <-b.donec - close(b.notifyCh) - - return nil -} - -func grpcHealthCheck(client *Client, ep string) (bool, error) { - conn, err := client.dial(ep) - if err != nil { - return false, err - } - defer conn.Close() - cli := healthpb.NewHealthClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) - cancel() - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { - if s.Message() == unknownService { // etcd < v3.3.0 - return true, nil - } - } - return false, err - } - return resp.Status == healthpb.HealthCheckResponse_SERVING, nil -} - -func hasAddr(addrs []grpc.Address, targetAddr string) bool { - for _, addr := range addrs { - if targetAddr == addr.Addr { - return true - } - } - return false -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} - -func eps2addrs(eps []string) []grpc.Address { - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - return addrs -} - -func getHostPort2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go deleted file mode 100644 index 5a7469bd4..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -type ( - CompactResponse pb.CompactionResponse - PutResponse pb.PutResponse - GetResponse pb.RangeResponse - DeleteResponse pb.DeleteRangeResponse - TxnResponse pb.TxnResponse -) - -type KV interface { - // Put puts a key-value pair into etcd. - // Note that key,value can be plain bytes array and string is - // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte{0x10, 0x20}). - Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) - - // Get retrieves keys. - // By default, Get will return the value for "key", if any. - // When passed WithRange(end), Get will return the keys in the range [key, end). - // When passed WithFromKey(), Get returns keys greater than or equal to key. - // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; - // if the required revision is compacted, the request will fail with ErrCompacted . - // When passed WithLimit(limit), the number of returned keys is bounded by limit. - // When passed WithSort(), the keys will be sorted. - Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) - - // Delete deletes a key, or optionally using WithRange(end), [key, end). - Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) - - // Compact compacts etcd KV history before the given rev. - Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - - // Do applies a single Op on KV without a transaction. - // Do is useful when creating arbitrary operations to be issued at a - // later time; the user can range over the operations, calling Do to - // execute them. Get/Put/Delete, on the other hand, are best suited - // for when the operation should be issued at the time of declaration. - Do(ctx context.Context, op Op) (OpResponse, error) - - // Txn creates a transaction. - Txn(ctx context.Context) Txn -} - -type OpResponse struct { - put *PutResponse - get *GetResponse - del *DeleteResponse - txn *TxnResponse -} - -func (op OpResponse) Put() *PutResponse { return op.put } -func (op OpResponse) Get() *GetResponse { return op.get } -func (op OpResponse) Del() *DeleteResponse { return op.del } -func (op OpResponse) Txn() *TxnResponse { return op.txn } - -func (resp *PutResponse) OpResponse() OpResponse { - return OpResponse{put: resp} -} -func (resp *GetResponse) OpResponse() OpResponse { - return OpResponse{get: resp} -} -func (resp *DeleteResponse) OpResponse() OpResponse { - return OpResponse{del: resp} -} -func (resp *TxnResponse) OpResponse() OpResponse { - return OpResponse{txn: resp} -} - -type kv struct { - remote pb.KVClient - callOpts []grpc.CallOption -} - -func NewKV(c *Client) KV { - api := &kv{remote: RetryKVClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { - api := &kv{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { - r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) -} - -func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { - r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) -} - -func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { - r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) -} - -func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*CompactResponse)(resp), err -} - -func (kv *kv) Txn(ctx context.Context) Txn { - return &txn{ - kv: kv, - ctx: ctx, - callOpts: kv.callOpts, - } -} - -func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - var err error - switch op.t { - case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil - } - case tPut: - var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - resp, err = kv.remote.Put(ctx, r, kv.callOpts...) - if err == nil { - return OpResponse{put: (*PutResponse)(resp)}, nil - } - case tDeleteRange: - var resp *pb.DeleteRangeResponse - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) - if err == nil { - return OpResponse{del: (*DeleteResponse)(resp)}, nil - } - case tTxn: - var resp *pb.TxnResponse - resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) - if err == nil { - return OpResponse{txn: (*TxnResponse)(resp)}, nil - } - default: - panic("Unknown op") - } - return OpResponse{}, toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go deleted file mode 100644 index 3729cf37b..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -type ( - LeaseRevokeResponse pb.LeaseRevokeResponse - LeaseID int64 -) - -// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. -type LeaseGrantResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 - Error string -} - -// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. -type LeaseKeepAliveResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 -} - -// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. -type LeaseTimeToLiveResponse struct { - *pb.ResponseHeader - ID LeaseID `json:"id"` - - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. - TTL int64 `json:"ttl"` - - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `json:"granted-ttl"` - - // Keys is the list of keys attached to this lease. - Keys [][]byte `json:"keys"` -} - -// LeaseStatus represents a lease status. -type LeaseStatus struct { - ID LeaseID `json:"id"` - // TODO: TTL int64 -} - -// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. -type LeaseLeasesResponse struct { - *pb.ResponseHeader - Leases []LeaseStatus `json:"leases"` -} - -const ( - // defaultTTL is the assumed lease TTL used for the first keepalive - // deadline before the actual TTL is known to the client. - defaultTTL = 5 * time.Second - // NoLease is a lease ID for the absence of a lease. - NoLease LeaseID = 0 - - // retryConnWait is how long to wait before retrying request due to an error - retryConnWait = 500 * time.Millisecond -) - -// LeaseResponseChSize is the size of buffer to store unsent lease responses. -// WARNING: DO NOT UPDATE. -// Only for testing purposes. -var LeaseResponseChSize = 16 - -// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. -// -// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. -type ErrKeepAliveHalted struct { - Reason error -} - -func (e ErrKeepAliveHalted) Error() string { - s := "etcdclient: leases keep alive halted" - if e.Reason != nil { - s += ": " + e.Reason.Error() - } - return s -} - -type Lease interface { - // Grant creates a new lease. - Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) - - // Revoke revokes the given lease. - Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) - - // TimeToLive retrieves the lease information of the given lease ID. - TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) - - // Leases retrieves all leases. - Leases(ctx context.Context) (*LeaseLeasesResponse, error) - - // KeepAlive keeps the given lease alive forever. If the keepalive response - // posted to the channel is not consumed immediately, the lease client will - // continue sending keep alive requests to the etcd server at least every - // second until latest response is consumed. - // - // The returned "LeaseKeepAliveResponse" channel closes if underlying keep - // alive stream is interrupted in some way the client cannot handle itself; - // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse" - // from this closed channel is nil. - // - // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: - // no leader") or canceled by the caller (e.g. context.Canceled), the error - // is returned. Otherwise, it retries. - // - // TODO(v4.0): post errors to last keep alive message before closing - // (see https://github.com/coreos/etcd/pull/7866) - KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - - // KeepAliveOnce renews the lease once. The response corresponds to the - // first message from calling KeepAlive. If the response has a recoverable - // error, KeepAliveOnce will retry the RPC with a new keep alive message. - // - // In most of the cases, Keepalive should be used instead of KeepAliveOnce. - KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) - - // Close releases all resources Lease keeps for efficient communication - // with the etcd server. - Close() error -} - -type lessor struct { - mu sync.Mutex // guards all fields - - // donec is closed and loopErr is set when recvKeepAliveLoop stops - donec chan struct{} - loopErr error - - remote pb.LeaseClient - - stream pb.Lease_LeaseKeepAliveClient - streamCancel context.CancelFunc - - stopCtx context.Context - stopCancel context.CancelFunc - - keepAlives map[LeaseID]*keepAlive - - // firstKeepAliveTimeout is the timeout for the first keepalive request - // before the actual TTL is known to the lease client - firstKeepAliveTimeout time.Duration - - // firstKeepAliveOnce ensures stream starts after first KeepAlive call. - firstKeepAliveOnce sync.Once - - callOpts []grpc.CallOption -} - -// keepAlive multiplexes a keepalive for a lease over multiple channels -type keepAlive struct { - chs []chan<- *LeaseKeepAliveResponse - ctxs []context.Context - // deadline is the time the keep alive channels close if no response - deadline time.Time - // nextKeepAlive is when to send the next keep alive message - nextKeepAlive time.Time - // donec is closed on lease revoke, expiration, or cancel. - donec chan struct{} -} - -func NewLease(c *Client) Lease { - return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) -} - -func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { - l := &lessor{ - donec: make(chan struct{}), - keepAlives: make(map[LeaseID]*keepAlive), - remote: remote, - firstKeepAliveTimeout: keepAliveTimeout, - } - if l.firstKeepAliveTimeout == time.Second { - l.firstKeepAliveTimeout = defaultTTL - } - if c != nil { - l.callOpts = c.callOpts - } - reqLeaderCtx := WithRequireLeader(context.Background()) - l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) - return l -} - -func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { - resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) - if err == nil { - leases := make([]LeaseStatus, len(resp.Leases)) - for i := range resp.Leases { - leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} - } - return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { - ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) - - l.mu.Lock() - // ensure that recvKeepAliveLoop is still running - select { - case <-l.donec: - err := l.loopErr - l.mu.Unlock() - close(ch) - return ch, ErrKeepAliveHalted{Reason: err} - default: - } - ka, ok := l.keepAlives[id] - if !ok { - // create fresh keep alive - ka = &keepAlive{ - chs: []chan<- *LeaseKeepAliveResponse{ch}, - ctxs: []context.Context{ctx}, - deadline: time.Now().Add(l.firstKeepAliveTimeout), - nextKeepAlive: time.Now(), - donec: make(chan struct{}), - } - l.keepAlives[id] = ka - } else { - // add channel and context to existing keep alive - ka.ctxs = append(ka.ctxs, ctx) - ka.chs = append(ka.chs, ch) - } - l.mu.Unlock() - - go l.keepAliveCtxCloser(id, ctx, ka.donec) - l.firstKeepAliveOnce.Do(func() { - go l.recvKeepAliveLoop() - go l.deadlineLoop() - }) - - return ch, nil -} - -func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - for { - resp, err := l.keepAliveOnce(ctx, id) - if err == nil { - if resp.TTL <= 0 { - err = rpctypes.ErrLeaseNotFound - } - return resp, err - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (l *lessor) Close() error { - l.stopCancel() - // close for synchronous teardown if stream goroutines never launched - l.firstKeepAliveOnce.Do(func() { close(l.donec) }) - <-l.donec - return nil -} - -func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { - select { - case <-donec: - return - case <-l.donec: - return - case <-ctx.Done(): - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[id] - if !ok { - return - } - - // close channel and remove context if still associated with keep alive - for i, c := range ka.ctxs { - if c == ctx { - close(ka.chs[i]) - ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) - ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) - break - } - } - // remove if no one more listeners - if len(ka.chs) == 0 { - delete(l.keepAlives, id) - } -} - -// closeRequireLeader scans keepAlives for ctxs that have require leader -// and closes the associated channels. -func (l *lessor) closeRequireLeader() { - l.mu.Lock() - defer l.mu.Unlock() - for _, ka := range l.keepAlives { - reqIdxs := 0 - // find all required leader channels, close, mark as nil - for i, ctx := range ka.ctxs { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - continue - } - ks := md[rpctypes.MetadataRequireLeaderKey] - if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { - continue - } - close(ka.chs[i]) - ka.chs[i] = nil - reqIdxs++ - } - if reqIdxs == 0 { - continue - } - // remove all channels that required a leader from keepalive - newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) - newCtxs := make([]context.Context, len(newChs)) - newIdx := 0 - for i := range ka.chs { - if ka.chs[i] == nil { - continue - } - newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] - newIdx++ - } - ka.chs, ka.ctxs = newChs, newCtxs - } -} - -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - - err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) - if err != nil { - return nil, toErr(ctx, err) - } - - resp, rerr := stream.Recv() - if rerr != nil { - return nil, toErr(ctx, rerr) - } - - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - return karesp, nil -} - -func (l *lessor) recvKeepAliveLoop() (gerr error) { - defer func() { - l.mu.Lock() - close(l.donec) - l.loopErr = gerr - for _, ka := range l.keepAlives { - ka.close() - } - l.keepAlives = make(map[LeaseID]*keepAlive) - l.mu.Unlock() - }() - - for { - stream, err := l.resetRecv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - } else { - for { - resp, err := stream.Recv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { - l.closeRequireLeader() - } - break - } - - l.recvKeepAlive(resp) - } - } - - select { - case <-time.After(retryConnWait): - continue - case <-l.stopCtx.Done(): - return l.stopCtx.Err() - } - } -} - -// resetRecv opens a new lease stream and starts sending keep alive requests. -func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { - sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) - if err != nil { - cancel() - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.stream != nil && l.streamCancel != nil { - l.streamCancel() - } - - l.streamCancel = cancel - l.stream = stream - - go l.sendKeepAliveLoop(stream) - return stream, nil -} - -// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse -func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[karesp.ID] - if !ok { - return - } - - if karesp.TTL <= 0 { - // lease expired; close all keep alive channels - delete(l.keepAlives, karesp.ID) - ka.close() - return - } - - // send update to all channels - nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) - ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) - for _, ch := range ka.chs { - select { - case ch <- karesp: - default: - } - // still advance in order to rate-limit keep-alive sends - ka.nextKeepAlive = nextKeepAlive - } -} - -// deadlineLoop reaps any keep alive channels that have not received a response -// within the lease TTL -func (l *lessor) deadlineLoop() { - for { - select { - case <-time.After(time.Second): - case <-l.donec: - return - } - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.deadline.Before(now) { - // waited too long for response; lease may be expired - ka.close() - delete(l.keepAlives, id) - } - } - l.mu.Unlock() - } -} - -// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. -func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { - for { - var tosend []LeaseID - - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.nextKeepAlive.Before(now) { - tosend = append(tosend, id) - } - } - l.mu.Unlock() - - for _, id := range tosend { - r := &pb.LeaseKeepAliveRequest{ID: int64(id)} - if err := stream.Send(r); err != nil { - // TODO do something with this error? - return - } - } - - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - } -} - -func (ka *keepAlive) close() { - close(ka.donec) - for _, ch := range ka.chs { - close(ch) - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go deleted file mode 100644 index 782e31313..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "io/ioutil" - "sync" - - "google.golang.org/grpc/grpclog" -) - -// Logger is the logger used by client library. -// It implements grpclog.LoggerV2 interface. -type Logger interface { - grpclog.LoggerV2 - - // Lvl returns logger if logger's verbosity level >= "lvl". - // Otherwise, logger that discards all logs. - Lvl(lvl int) Logger - - // to satisfy capnslog - - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - -var ( - loggerMu sync.RWMutex - logger Logger -) - -type settableLogger struct { - l grpclog.LoggerV2 - mu sync.RWMutex -} - -func init() { - // disable client side logs by default - logger = &settableLogger{} - SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) -} - -// SetLogger sets client-side Logger. -func SetLogger(l grpclog.LoggerV2) { - loggerMu.Lock() - logger = NewLogger(l) - // override grpclog so that any changes happen with locking - grpclog.SetLoggerV2(logger) - loggerMu.Unlock() -} - -// GetLogger returns the current logger. -func GetLogger() Logger { - loggerMu.RLock() - l := logger - loggerMu.RUnlock() - return l -} - -// NewLogger returns a new Logger with grpclog.LoggerV2. -func NewLogger(gl grpclog.LoggerV2) Logger { - return &settableLogger{l: gl} -} - -func (s *settableLogger) get() grpclog.LoggerV2 { - s.mu.RLock() - l := s.l - s.mu.RUnlock() - return l -} - -// implement the grpclog.LoggerV2 interface - -func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } -func (s *settableLogger) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args...) -} -func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } -func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } -func (s *settableLogger) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args...) -} -func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } -func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } -func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) V(l int) bool { return s.get().V(l) } -func (s *settableLogger) Lvl(lvl int) Logger { - s.mu.RLock() - l := s.l - s.mu.RUnlock() - if l.V(lvl) { - return s - } - return &noLogger{} -} - -type noLogger struct{} - -func (*noLogger) Info(args ...interface{}) {} -func (*noLogger) Infof(format string, args ...interface{}) {} -func (*noLogger) Infoln(args ...interface{}) {} -func (*noLogger) Warning(args ...interface{}) {} -func (*noLogger) Warningf(format string, args ...interface{}) {} -func (*noLogger) Warningln(args ...interface{}) {} -func (*noLogger) Error(args ...interface{}) {} -func (*noLogger) Errorf(format string, args ...interface{}) {} -func (*noLogger) Errorln(args ...interface{}) {} -func (*noLogger) Fatal(args ...interface{}) {} -func (*noLogger) Fatalf(format string, args ...interface{}) {} -func (*noLogger) Fatalln(args ...interface{}) {} -func (*noLogger) Print(args ...interface{}) {} -func (*noLogger) Printf(format string, args ...interface{}) {} -func (*noLogger) Println(args ...interface{}) {} -func (*noLogger) V(l int) bool { return false } -func (ng *noLogger) Lvl(lvl int) Logger { return ng } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go deleted file mode 100644 index f60cfbe47..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "io" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -type ( - DefragmentResponse pb.DefragmentResponse - AlarmResponse pb.AlarmResponse - AlarmMember pb.AlarmMember - StatusResponse pb.StatusResponse - HashKVResponse pb.HashKVResponse - MoveLeaderResponse pb.MoveLeaderResponse -) - -type Maintenance interface { - // AlarmList gets all active alarms. - AlarmList(ctx context.Context) (*AlarmResponse, error) - - // AlarmDisarm disarms a given alarm. - AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - - // Defragment releases wasted space from internal fragmentation on a given etcd member. - // Defragment is only needed when deleting a large number of keys and want to reclaim - // the resources. - // Defragment is an expensive operation. User should avoid defragmenting multiple members - // at the same time. - // To defragment multiple members in the cluster, user need to call defragment multiple - // times with different endpoints. - Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) - - // Status gets the status of the endpoint. - Status(ctx context.Context, endpoint string) (*StatusResponse, error) - - // HashKV returns a hash of the KV state at the time of the RPC. - // If revision is zero, the hash is computed on all keys. If the revision - // is non-zero, the hash is computed on all keys at or below the given revision. - HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) - - // Snapshot provides a reader for a point-in-time snapshot of etcd. - Snapshot(ctx context.Context) (io.ReadCloser, error) - - // MoveLeader requests current leader to transfer its leadership to the transferee. - // Request must be made to the leader. - MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) -} - -type maintenance struct { - dial func(endpoint string) (pb.MaintenanceClient, func(), error) - remote pb.MaintenanceClient - callOpts []grpc.CallOption -} - -func NewMaintenance(c *Client) Maintenance { - api := &maintenance{ - dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { - conn, err := c.dial(endpoint) - if err != nil { - return nil, nil, err - } - cancel := func() { conn.Close() } - return RetryMaintenanceClient(c, conn), cancel, nil - }, - remote: RetryMaintenanceClient(c, c.conn), - } - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { - api := &maintenance{ - dial: func(string) (pb.MaintenanceClient, func(), error) { - return remote, func() {}, nil - }, - remote: remote, - } - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_GET, - MemberID: 0, // all - Alarm: pb.AlarmType_NONE, // all - } - resp, err := m.remote.Alarm(ctx, req, m.callOpts...) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_DEACTIVATE, - MemberID: am.MemberID, - Alarm: am.Alarm, - } - - if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { - ar, err := m.AlarmList(ctx) - if err != nil { - return nil, toErr(ctx, err) - } - ret := AlarmResponse{} - for _, am := range ar.Alarms { - dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) - if derr != nil { - return nil, toErr(ctx, derr) - } - ret.Alarms = append(ret.Alarms, dresp.Alarms...) - } - return &ret, nil - } - - resp, err := m.remote.Alarm(ctx, req, m.callOpts...) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*DefragmentResponse)(resp), nil -} - -func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*StatusResponse)(resp), nil -} - -func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*HashKVResponse)(resp), nil -} - -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - - pr, pw := io.Pipe() - go func() { - for { - resp, err := ss.Recv() - if err != nil { - pw.CloseWithError(err) - return - } - if resp == nil && err == nil { - break - } - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) - return - } - } - pw.Close() - }() - return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil -} - -type snapshotReadCloser struct { - ctx context.Context - io.ReadCloser -} - -func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { - n, err = rc.ReadCloser.Read(p) - return n, toErr(rc.ctx, err) -} - -func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { - resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go deleted file mode 100644 index c6ec5bf52..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - -type opType int - -const ( - // A default Op has opType 0, which is invalid. - tRange opType = iota + 1 - tPut - tDeleteRange - tTxn -) - -var ( - noPrefixEnd = []byte{0} -) - -// Op represents an Operation that kv can execute. -type Op struct { - t opType - key []byte - end []byte - - // for range - limit int64 - sort *SortOption - serializable bool - keysOnly bool - countOnly bool - minModRev int64 - maxModRev int64 - minCreateRev int64 - maxCreateRev int64 - - // for range, watch - rev int64 - - // for watch, put, delete - prevKV bool - - // for put - ignoreValue bool - ignoreLease bool - - // progressNotify is for progress updates. - progressNotify bool - // createdNotify is for created event - createdNotify bool - // filters for watchers - filterPut bool - filterDelete bool - - // for put - val []byte - leaseID LeaseID - - // txn - cmps []Cmp - thenOps []Op - elseOps []Op -} - -// accessors / mutators - -func (op Op) IsTxn() bool { return op.t == tTxn } -func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } - -// KeyBytes returns the byte slice holding the Op's key. -func (op Op) KeyBytes() []byte { return op.key } - -// WithKeyBytes sets the byte slice for the Op's key. -func (op *Op) WithKeyBytes(key []byte) { op.key = key } - -// RangeBytes returns the byte slice holding with the Op's range end, if any. -func (op Op) RangeBytes() []byte { return op.end } - -// Rev returns the requested revision, if any. -func (op Op) Rev() int64 { return op.rev } - -// IsPut returns true iff the operation is a Put. -func (op Op) IsPut() bool { return op.t == tPut } - -// IsGet returns true iff the operation is a Get. -func (op Op) IsGet() bool { return op.t == tRange } - -// IsDelete returns true iff the operation is a Delete. -func (op Op) IsDelete() bool { return op.t == tDeleteRange } - -// IsSerializable returns true if the serializable field is true. -func (op Op) IsSerializable() bool { return op.serializable == true } - -// IsKeysOnly returns whether keysOnly is set. -func (op Op) IsKeysOnly() bool { return op.keysOnly == true } - -// IsCountOnly returns whether countOnly is set. -func (op Op) IsCountOnly() bool { return op.countOnly == true } - -// MinModRev returns the operation's minimum modify revision. -func (op Op) MinModRev() int64 { return op.minModRev } - -// MaxModRev returns the operation's maximum modify revision. -func (op Op) MaxModRev() int64 { return op.maxModRev } - -// MinCreateRev returns the operation's minimum create revision. -func (op Op) MinCreateRev() int64 { return op.minCreateRev } - -// MaxCreateRev returns the operation's maximum create revision. -func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } - -// WithRangeBytes sets the byte slice for the Op's range end. -func (op *Op) WithRangeBytes(end []byte) { op.end = end } - -// ValueBytes returns the byte slice holding the Op's value, if any. -func (op Op) ValueBytes() []byte { return op.val } - -// WithValueBytes sets the byte slice for the Op's value. -func (op *Op) WithValueBytes(v []byte) { op.val = v } - -func (op Op) toRangeRequest() *pb.RangeRequest { - if op.t != tRange { - panic("op.t != tRange") - } - r := &pb.RangeRequest{ - Key: op.key, - RangeEnd: op.end, - Limit: op.limit, - Revision: op.rev, - Serializable: op.serializable, - KeysOnly: op.keysOnly, - CountOnly: op.countOnly, - MinModRevision: op.minModRev, - MaxModRevision: op.maxModRev, - MinCreateRevision: op.minCreateRev, - MaxCreateRevision: op.maxCreateRev, - } - if op.sort != nil { - r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) - r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) - } - return r -} - -func (op Op) toTxnRequest() *pb.TxnRequest { - thenOps := make([]*pb.RequestOp, len(op.thenOps)) - for i, tOp := range op.thenOps { - thenOps[i] = tOp.toRequestOp() - } - elseOps := make([]*pb.RequestOp, len(op.elseOps)) - for i, eOp := range op.elseOps { - elseOps[i] = eOp.toRequestOp() - } - cmps := make([]*pb.Compare, len(op.cmps)) - for i := range op.cmps { - cmps[i] = (*pb.Compare)(&op.cmps[i]) - } - return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} -} - -func (op Op) toRequestOp() *pb.RequestOp { - switch op.t { - case tRange: - return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} - case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} - case tDeleteRange: - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} - case tTxn: - return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} - default: - panic("Unknown Op") - } -} - -func (op Op) isWrite() bool { - if op.t == tTxn { - for _, tOp := range op.thenOps { - if tOp.isWrite() { - return true - } - } - for _, tOp := range op.elseOps { - if tOp.isWrite() { - return true - } - } - return false - } - return op.t != tRange -} - -func OpGet(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - return ret -} - -func OpDelete(key string, opts ...OpOption) Op { - ret := Op{t: tDeleteRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in delete") - case ret.limit != 0: - panic("unexpected limit in delete") - case ret.rev != 0: - panic("unexpected revision in delete") - case ret.sort != nil: - panic("unexpected sort in delete") - case ret.serializable: - panic("unexpected serializable in delete") - case ret.countOnly: - panic("unexpected countOnly in delete") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in delete") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in delete") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in delete") - case ret.createdNotify: - panic("unexpected createdNotify in delete") - } - return ret -} - -func OpPut(key, val string, opts ...OpOption) Op { - ret := Op{t: tPut, key: []byte(key), val: []byte(val)} - ret.applyOpts(opts) - switch { - case ret.end != nil: - panic("unexpected range in put") - case ret.limit != 0: - panic("unexpected limit in put") - case ret.rev != 0: - panic("unexpected revision in put") - case ret.sort != nil: - panic("unexpected sort in put") - case ret.serializable: - panic("unexpected serializable in put") - case ret.countOnly: - panic("unexpected countOnly in put") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in put") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in put") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in put") - case ret.createdNotify: - panic("unexpected createdNotify in put") - } - return ret -} - -func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { - return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} -} - -func opWatch(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in watch") - case ret.limit != 0: - panic("unexpected limit in watch") - case ret.sort != nil: - panic("unexpected sort in watch") - case ret.serializable: - panic("unexpected serializable in watch") - case ret.countOnly: - panic("unexpected countOnly in watch") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in watch") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in watch") - } - return ret -} - -func (op *Op) applyOpts(opts []OpOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpOption configures Operations like Get, Put, Delete. -type OpOption func(*Op) - -// WithLease attaches a lease ID to a key in 'Put' request. -func WithLease(leaseID LeaseID) OpOption { - return func(op *Op) { op.leaseID = leaseID } -} - -// WithLimit limits the number of results to return from 'Get' request. -// If WithLimit is given a 0 limit, it is treated as no limit. -func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } - -// WithRev specifies the store revision for 'Get' request. -// Or the start revision of 'Watch' request. -func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } - -// WithSort specifies the ordering in 'Get' request. It requires -// 'WithRange' and/or 'WithPrefix' to be specified too. -// 'target' specifies the target to sort by: key, version, revisions, value. -// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. -func WithSort(target SortTarget, order SortOrder) OpOption { - return func(op *Op) { - if target == SortByKey && order == SortAscend { - // If order != SortNone, server fetches the entire key-space, - // and then applies the sort and limit, if provided. - // Since by default the server returns results sorted by keys - // in lexicographically ascending order, the client should ignore - // SortOrder if the target is SortByKey. - order = SortNone - } - op.sort = &SortOption{target, order} - } -} - -// GetPrefixRangeEnd gets the range end of the prefix. -// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. -func GetPrefixRangeEnd(prefix string) string { - return string(getPrefix([]byte(prefix))) -} - -func getPrefix(key []byte) []byte { - end := make([]byte, len(key)) - copy(end, key) - for i := len(end) - 1; i >= 0; i-- { - if end[i] < 0xff { - end[i] = end[i] + 1 - end = end[:i+1] - return end - } - } - // next prefix does not exist (e.g., 0xffff); - // default to WithFromKey policy - return noPrefixEnd -} - -// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate -// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' -// can return 'foo1', 'foo2', and so on. -func WithPrefix() OpOption { - return func(op *Op) { - if len(op.key) == 0 { - op.key, op.end = []byte{0}, []byte{0} - return - } - op.end = getPrefix(op.key) - } -} - -// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. -// For example, 'Get' requests with 'WithRange(end)' returns -// the keys in the range [key, end). -// endKey must be lexicographically greater than start key. -func WithRange(endKey string) OpOption { - return func(op *Op) { op.end = []byte(endKey) } -} - -// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests -// to be equal or greater than the key in the argument. -func WithFromKey() OpOption { return WithRange("\x00") } - -// WithSerializable makes 'Get' request serializable. By default, -// it's linearizable. Serializable requests are better for lower latency -// requirement. -func WithSerializable() OpOption { - return func(op *Op) { op.serializable = true } -} - -// WithKeysOnly makes the 'Get' request return only the keys and the corresponding -// values will be omitted. -func WithKeysOnly() OpOption { - return func(op *Op) { op.keysOnly = true } -} - -// WithCountOnly makes the 'Get' request return only the count of keys. -func WithCountOnly() OpOption { - return func(op *Op) { op.countOnly = true } -} - -// WithMinModRev filters out keys for Get with modification revisions less than the given revision. -func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } - -// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. -func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } - -// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. -func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } - -// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. -func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } - -// WithFirstCreate gets the key with the oldest creation revision in the request range. -func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } - -// WithLastCreate gets the key with the latest creation revision in the request range. -func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } - -// WithFirstKey gets the lexically first key in the request range. -func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } - -// WithLastKey gets the lexically last key in the request range. -func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } - -// WithFirstRev gets the key with the oldest modification revision in the request range. -func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } - -// WithLastRev gets the key with the latest modification revision in the request range. -func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } - -// withTop gets the first key over the get's prefix given a sort order -func withTop(target SortTarget, order SortOrder) []OpOption { - return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} -} - -// WithProgressNotify makes watch server send periodic progress updates -// every 10 minutes when there is no incoming events. -// Progress updates have zero events in WatchResponse. -func WithProgressNotify() OpOption { - return func(op *Op) { - op.progressNotify = true - } -} - -// WithCreatedNotify makes watch server sends the created event. -func WithCreatedNotify() OpOption { - return func(op *Op) { - op.createdNotify = true - } -} - -// WithFilterPut discards PUT events from the watcher. -func WithFilterPut() OpOption { - return func(op *Op) { op.filterPut = true } -} - -// WithFilterDelete discards DELETE events from the watcher. -func WithFilterDelete() OpOption { - return func(op *Op) { op.filterDelete = true } -} - -// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, -// nothing will be returned. -func WithPrevKV() OpOption { - return func(op *Op) { - op.prevKV = true - } -} - -// WithIgnoreValue updates the key using its current value. -// This option can not be combined with non-empty values. -// Returns an error if the key does not exist. -func WithIgnoreValue() OpOption { - return func(op *Op) { - op.ignoreValue = true - } -} - -// WithIgnoreLease updates the key using its current lease. -// This option can not be combined with WithLease. -// Returns an error if the key does not exist. -func WithIgnoreLease() OpOption { - return func(op *Op) { - op.ignoreLease = true - } -} - -// LeaseOp represents an Operation that lease can execute. -type LeaseOp struct { - id LeaseID - - // for TimeToLive - attachedKeys bool -} - -// LeaseOption configures lease operations. -type LeaseOption func(*LeaseOp) - -func (op *LeaseOp) applyOpts(opts []LeaseOption) { - for _, opt := range opts { - opt(op) - } -} - -// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. -func WithAttachedKeys() LeaseOption { - return func(op *LeaseOp) { op.attachedKeys = true } -} - -func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { - ret := &LeaseOp{id: id} - ret.applyOpts(opts) - return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/github.com/coreos/etcd/clientv3/options.go deleted file mode 100644 index fa25811f3..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "math" - - "google.golang.org/grpc" -) - -var ( - // Disable gRPC internal retrial logic - // TODO: enable when gRPC retry is stable (FailFast=false) - // Reference: - // - https://github.com/grpc/grpc-go/issues/1532 - // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md - defaultFailFast = grpc.FailFast(true) - - // client-side request send limit, gRPC default is math.MaxInt32 - // Make sure that "client-side send limit < server-side default send/recv limit" - // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes - defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) - - // client-side response receive limit, gRPC default is 4MB - // Make sure that "client-side receive limit >= server-side default send/recv limit" - // because range response can easily exceed request send limits - // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway - defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) -) - -// defaultCallOpts defines a list of default "gRPC.CallOption". -// Some options are exposed to "clientv3.Config". -// Defaults will be overridden by the settings in "clientv3.Config". -var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} - -// MaxLeaseTTL is the maximum lease TTL value -const MaxLeaseTTL = 9000000000 diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go deleted file mode 100644 index c6ef585b5..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import "context" - -// TODO: remove this when "FailFast=false" is fixed. -// See https://github.com/grpc/grpc-go/issues/1532. -func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { - select { - case <-ready: - return nil - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-clientCtx.Done(): - return clientCtx.Err() - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go deleted file mode 100644 index 7f89ba641..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type retryPolicy uint8 - -const ( - repeatable retryPolicy = iota - nonRepeatable -) - -type rpcFunc func(ctx context.Context) error -type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error -type retryStopErrFunc func(error) bool - -// immutable requests (e.g. Get) should be retried unless it's -// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). -// -// "isRepeatableStopError" returns "true" when an immutable request -// is interrupted by server-side or gRPC-side error and its status -// code is not transient (!= codes.Unavailable). -// -// Returning "true" means retry should stop, since client cannot -// handle itself even with retries. -func isRepeatableStopError(err error) bool { - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { - return true - } - // only retry if unavailable - ev, _ := status.FromError(err) - return ev.Code() != codes.Unavailable -} - -// mutable requests (e.g. Put, Delete, Txn) should only be retried -// when the status code is codes.Unavailable when initial connection -// has not been established (no pinned endpoint). -// -// "isNonRepeatableStopError" returns "true" when a mutable request -// is interrupted by non-transient error that client cannot handle itself, -// or transient error while the connection has already been established -// (pinned endpoint exists). -// -// Returning "true" means retry should stop, otherwise it violates -// write-at-most-once semantics. -func isNonRepeatableStopError(err error) bool { - ev, _ := status.FromError(err) - if ev.Code() != codes.Unavailable { - return true - } - desc := rpctypes.ErrorDesc(err) - return desc != "there is no address available" && desc != "there is no connection available" -} - -func (c *Client) newRetryWrapper() retryRPCFunc { - return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error { - var isStop retryStopErrFunc - switch rp { - case repeatable: - isStop = isRepeatableStopError - case nonRepeatable: - isStop = isNonRepeatableStopError - } - for { - if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { - return err - } - pinned := c.balancer.pinned() - err := f(rpcCtx) - if err == nil { - return nil - } - logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) - - if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { - // mark this before endpoint switch is triggered - c.balancer.hostPortError(pinned, err) - c.balancer.next() - logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) - } - - if isStop(err) { - return err - } - } - } -} - -func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc { - return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error { - for { - pinned := c.balancer.pinned() - err := retryf(rpcCtx, f, rp) - if err == nil { - return nil - } - logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) - // always stop retry on etcd errors other than invalid auth token - if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { - gterr := c.getToken(rpcCtx) - if gterr != nil { - logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) - return err // return the original error for simplicity - } - continue - } - return err - } - } -} - -type retryKVClient struct { - kc pb.KVClient - retryf retryRPCFunc -} - -// RetryKVClient implements a KVClient. -func RetryKVClient(c *Client) pb.KVClient { - return &retryKVClient{ - kc: pb.NewKVClient(c.conn), - retryf: c.newAuthRetryWrapper(c.newRetryWrapper()), - } -} -func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Range(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Put(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.DeleteRange(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - // TODO: "repeatable" for read-only txn - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Txn(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.kc.Compact(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -type retryLeaseClient struct { - lc pb.LeaseClient - retryf retryRPCFunc -} - -// RetryLeaseClient implements a LeaseClient. -func RetryLeaseClient(c *Client) pb.LeaseClient { - return &retryLeaseClient{ - lc: pb.NewLeaseClient(c.conn), - retryf: c.newAuthRetryWrapper(c.newRetryWrapper()), - } -} - -func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseLeases(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) - return err - }, repeatable) - return resp, err - -} - -func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) - return err - }, repeatable) - return stream, err -} - -type retryClusterClient struct { - cc pb.ClusterClient - retryf retryRPCFunc -} - -// RetryClusterClient implements a ClusterClient. -func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{ - cc: pb.NewClusterClient(c.conn), - retryf: c.newRetryWrapper(), - } -} - -func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberList(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberAdd(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberRemove(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -type retryMaintenanceClient struct { - mc pb.MaintenanceClient - retryf retryRPCFunc -} - -// RetryMaintenanceClient implements a Maintenance. -func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { - return &retryMaintenanceClient{ - mc: pb.NewMaintenanceClient(conn), - retryf: c.newRetryWrapper(), - } -} - -func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Alarm(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Status(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Hash(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.HashKV(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - stream, err = rmc.mc.Snapshot(rctx, in, opts...) - return err - }, repeatable) - return stream, err -} - -func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.MoveLeader(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { - err = rmc.retryf(ctx, func(rctx context.Context) error { - resp, err = rmc.mc.Defragment(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -type retryAuthClient struct { - ac pb.AuthClient - retryf retryRPCFunc -} - -// RetryAuthClient implements a AuthClient. -func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{ - ac: pb.NewAuthClient(c.conn), - retryf: c.newRetryWrapper(), - } -} - -func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserList(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserGet(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleGet(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleList(rctx, in, opts...) - return err - }, repeatable) - return resp, err -} - -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.AuthEnable(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.AuthDisable(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserAdd(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserDelete(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserChangePassword(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserGrantRole(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleAdd(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleDelete(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} - -func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.ac.Authenticate(rctx, in, opts...) - return err - }, nonRepeatable) - return resp, err -} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go deleted file mode 100644 index 2bb9d9a13..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/sort.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -type SortTarget int -type SortOrder int - -const ( - SortNone SortOrder = iota - SortAscend - SortDescend -) - -const ( - SortByKey SortTarget = iota - SortByVersion - SortByCreateRevision - SortByModRevision - SortByValue -) - -type SortOption struct { - Target SortTarget - Order SortOrder -} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go deleted file mode 100644 index c3c2d2485..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -// Txn is the interface that wraps mini-transactions. -// -// Txn(context.TODO()).If( -// Compare(Value(k1), ">", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -// -type Txn interface { - // If takes a list of comparison. If all comparisons passed in succeed, - // the operations passed into Then() will be executed. Or the operations - // passed into Else() will be executed. - If(cs ...Cmp) Txn - - // Then takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() succeed. - Then(ops ...Op) Txn - - // Else takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() fail. - Else(ops ...Op) Txn - - // Commit tries to commit the transaction. - Commit() (*TxnResponse, error) -} - -type txn struct { - kv *kv - ctx context.Context - - mu sync.Mutex - cif bool - cthen bool - celse bool - - isWrite bool - - cmps []*pb.Compare - - sus []*pb.RequestOp - fas []*pb.RequestOp - - callOpts []grpc.CallOption -} - -func (txn *txn) If(cs ...Cmp) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cif { - panic("cannot call If twice!") - } - - if txn.cthen { - panic("cannot call If after Then!") - } - - if txn.celse { - panic("cannot call If after Else!") - } - - txn.cif = true - - for i := range cs { - txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) - } - - return txn -} - -func (txn *txn) Then(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cthen { - panic("cannot call Then twice!") - } - if txn.celse { - panic("cannot call Then after Else!") - } - - txn.cthen = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.sus = append(txn.sus, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Else(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.celse { - panic("cannot call Else twice!") - } - - txn.celse = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.fas = append(txn.fas, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Commit() (*TxnResponse, error) { - txn.mu.Lock() - defer txn.mu.Unlock() - - r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - - var resp *pb.TxnResponse - var err error - resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) - if err != nil { - return nil, toErr(txn.ctx, err) - } - return (*TxnResponse)(resp), nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go deleted file mode 100644 index d7633850e..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "sync" - "time" - - v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -const ( - EventTypeDelete = mvccpb.DELETE - EventTypePut = mvccpb.PUT - - closeSendErrTimeout = 250 * time.Millisecond -) - -type Event mvccpb.Event - -type WatchChan <-chan WatchResponse - -type Watcher interface { - // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. If revisions waiting to be sent over the - // watch are compacted, then the watch will be canceled by the server, the - // client will post a compacted error watch response, and the channel will close. - Watch(ctx context.Context, key string, opts ...OpOption) WatchChan - - // Close closes the watcher and cancels all watch requests. - Close() error -} - -type WatchResponse struct { - Header pb.ResponseHeader - Events []*Event - - // CompactRevision is the minimum revision the watcher may receive. - CompactRevision int64 - - // Canceled is used to indicate watch failure. - // If the watch failed and the stream was about to close, before the channel is closed, - // the channel sends a final response that has Canceled set to true with a non-nil Err(). - Canceled bool - - // Created is used to indicate the creation of the watcher. - Created bool - - closeErr error - - // cancelReason is a reason of canceling watch - cancelReason string -} - -// IsCreate returns true if the event tells that the key is newly created. -func (e *Event) IsCreate() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision -} - -// IsModify returns true if the event tells that a new value is put on existing key. -func (e *Event) IsModify() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision -} - -// Err is the error value if this WatchResponse holds an error. -func (wr *WatchResponse) Err() error { - switch { - case wr.closeErr != nil: - return v3rpc.Error(wr.closeErr) - case wr.CompactRevision != 0: - return v3rpc.ErrCompacted - case wr.Canceled: - if len(wr.cancelReason) != 0 { - return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) - } - return v3rpc.ErrFutureRev - } - return nil -} - -// IsProgressNotify returns true if the WatchResponse is progress notification. -func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 -} - -// watcher implements the Watcher interface -type watcher struct { - remote pb.WatchClient - callOpts []grpc.CallOption - - // mu protects the grpc streams map - mu sync.RWMutex - - // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream -} - -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient - callOpts []grpc.CallOption - - // ctx controls internal remote.Watch requests - ctx context.Context - // ctxKey is the key used when looking up this stream's context - ctxKey string - cancel context.CancelFunc - - // substreams holds all active watchers on this grpc stream - substreams map[int64]*watcherStream - // resuming holds all resuming watchers on this grpc stream - resuming []*watcherStream - - // reqc sends a watch request from Watch() to the main goroutine - reqc chan *watchRequest - // respc receives data from the watch client - respc chan *pb.WatchResponse - // donec closes to broadcast shutdown - donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconnect logic - errc chan error - // closingc gets the watcherStream of closing watchers - closingc chan *watcherStream - // wg is Done when all substream goroutines have exited - wg sync.WaitGroup - - // resumec closes to signal that all substreams should begin resuming - resumec chan struct{} - // closeErr is the error that closed the watch stream - closeErr error -} - -// watchRequest is issued by the subscriber to start a new watcher -type watchRequest struct { - ctx context.Context - key string - end string - rev int64 - // send created notification event if this field is true - createdNotify bool - // progressNotify is for progress updates - progressNotify bool - // filters is the list of events to filter out - filters []pb.WatchCreateRequest_FilterType - // get the previous key-value pair before the event happens - prevKV bool - // retc receives a chan WatchResponse once the watcher is established - retc chan chan WatchResponse -} - -// watcherStream represents a registered watcher -type watcherStream struct { - // initReq is the request that initiated this request - initReq watchRequest - - // outc publishes watch responses to subscriber - outc chan WatchResponse - // recvc buffers watch responses before publishing - recvc chan *WatchResponse - // donec closes when the watcherStream goroutine stops. - donec chan struct{} - // closing is set to true when stream should be scheduled to shutdown. - closing bool - // id is the registered watch id on the grpc stream - id int64 - - // buf holds all events received from etcd but not yet consumed by the client - buf []*WatchResponse -} - -func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) -} - -func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { - w := &watcher{ - remote: wc, - streams: make(map[string]*watchGrpcStream), - } - if c != nil { - w.callOpts = c.callOpts - } - return w -} - -// never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) - -// ctx with only the values; never Done -type valCtx struct{ context.Context } - -func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } -func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } -func (vc *valCtx) Err() error { return nil } - -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { - ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - callOpts: w.callOpts, - ctx: ctx, - ctxKey: streamKeyFromCtx(inctx), - cancel: cancel, - substreams: make(map[int64]*watcherStream), - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), - } - go wgs.run() - return wgs -} - -// Watch posts a watch request to run() and waits for a new watcher channel -func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { - ow := opWatch(key, opts...) - - var filters []pb.WatchCreateRequest_FilterType - if ow.filterPut { - filters = append(filters, pb.WatchCreateRequest_NOPUT) - } - if ow.filterDelete { - filters = append(filters, pb.WatchCreateRequest_NODELETE) - } - - wr := &watchRequest{ - ctx: ctx, - createdNotify: ow.createdNotify, - key: string(ow.key), - end: string(ow.end), - rev: ow.rev, - progressNotify: ow.progressNotify, - filters: filters, - prevKV: ow.prevKV, - retc: make(chan chan WatchResponse, 1), - } - - ok := false - ctxKey := streamKeyFromCtx(ctx) - - // find or allocate appropriate grpc watch stream - w.mu.Lock() - if w.streams == nil { - // closed - w.mu.Unlock() - ch := make(chan WatchResponse) - close(ch) - return ch - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - // couldn't create channel; return closed channel - closeCh := make(chan WatchResponse, 1) - - // submit request - select { - case reqc <- wr: - ok = true - case <-wr.ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - - // receive channel - if ok { - select { - case ret := <-wr.retc: - return ret - case <-ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - } - - close(closeCh) - return closeCh -} - -func (w *watcher) Close() (err error) { - w.mu.Lock() - streams := w.streams - w.streams = nil - w.mu.Unlock() - for _, wgs := range streams { - if werr := wgs.close(); werr != nil { - err = werr - } - } - return err -} - -func (w *watchGrpcStream) close() (err error) { - w.cancel() - <-w.donec - select { - case err = <-w.errc: - default: - } - return toErr(w.ctx, err) -} - -func (w *watcher) closeStream(wgs *watchGrpcStream) { - w.mu.Lock() - close(wgs.donec) - wgs.cancel() - if w.streams != nil { - delete(w.streams, wgs.ctxKey) - } - w.mu.Unlock() -} - -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { - if resp.WatchId == -1 { - // failed; no channel - close(ws.recvc) - return - } - ws.id = resp.WatchId - w.substreams[ws.id] = ws -} - -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { - select { - case ws.outc <- *resp: - case <-ws.initReq.ctx.Done(): - case <-time.After(closeSendErrTimeout): - } - close(ws.outc) -} - -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { - // send channel response in case stream was never established - select { - case ws.initReq.retc <- ws.outc: - default: - } - // close subscriber's channel - if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { - go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) - } else if ws.outc != nil { - close(ws.outc) - } - if ws.id != -1 { - delete(w.substreams, ws.id) - return - } - for i := range w.resuming { - if w.resuming[i] == ws { - w.resuming[i] = nil - return - } - } -} - -// run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { - var wc pb.Watch_WatchClient - var closeErr error - - // substreams marked to close but goroutine still running; needed for - // avoiding double-closing recvc on grpc stream teardown - closing := make(map[*watcherStream]struct{}) - - defer func() { - w.closeErr = closeErr - // shutdown substreams and resuming substreams - for _, ws := range w.substreams { - if _, ok := closing[ws]; !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - for _, ws := range w.resuming { - if _, ok := closing[ws]; ws != nil && !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - w.joinSubstreams() - for range closing { - w.closeSubstream(<-w.closingc) - } - w.wg.Wait() - w.owner.closeStream(w) - }() - - // start a stream with the etcd grpc server - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - - cancelSet := make(map[int64]struct{}) - - for { - select { - // Watch() requested - case wreq := <-w.reqc: - outc := make(chan WatchResponse, 1) - ws := &watcherStream{ - initReq: *wreq, - id: -1, - outc: outc, - // unbuffered so resumes won't cause repeat events - recvc: make(chan *WatchResponse), - } - - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - - // queue up for watcher creation/resume - w.resuming = append(w.resuming, ws) - if len(w.resuming) == 1 { - // head of resume queue, can register a new watcher - wc.Send(ws.initReq.toPB()) - } - // New events from the watch client - case pbresp := <-w.respc: - switch { - case pbresp.Created: - // response to head of queue creation - if ws := w.resuming[0]; ws != nil { - w.addSubstream(pbresp, ws) - w.dispatchEvent(pbresp) - w.resuming[0] = nil - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - case pbresp.Canceled && pbresp.CompactRevision == 0: - delete(cancelSet, pbresp.WatchId) - if ws, ok := w.substreams[pbresp.WatchId]; ok { - // signal to stream goroutine to update closingc - close(ws.recvc) - closing[ws] = struct{}{} - } - default: - // dispatch to appropriate watch stream - if ok := w.dispatchEvent(pbresp); ok { - break - } - // watch response on unexpected watch id; cancel id - if _, ok := cancelSet[pbresp.WatchId]; ok { - break - } - cancelSet[pbresp.WatchId] = struct{}{} - cr := &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: pbresp.WatchId, - }, - } - req := &pb.WatchRequest{RequestUnion: cr} - wc.Send(req) - } - // watch client failed on Recv; spawn another if possible - case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { - closeErr = err - return - } - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - cancelSet = make(map[int64]struct{}) - case <-w.ctx.Done(): - return - case ws := <-w.closingc: - w.closeSubstream(ws) - delete(closing, ws) - if len(w.substreams)+len(w.resuming) == 0 { - // no more watchers on this stream, shutdown - return - } - } - } -} - -// nextResume chooses the next resuming to register with the grpc stream. Abandoned -// streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { - for len(w.resuming) != 0 { - if w.resuming[0] != nil { - return w.resuming[0] - } - w.resuming = w.resuming[1:len(w.resuming)] - } - return nil -} - -// dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - events := make([]*Event, len(pbresp.Events)) - for i, ev := range pbresp.Events { - events[i] = (*Event)(ev) - } - wr := &WatchResponse{ - Header: *pbresp.Header, - Events: events, - CompactRevision: pbresp.CompactRevision, - Created: pbresp.Created, - Canceled: pbresp.Canceled, - cancelReason: pbresp.CancelReason, - } - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } - select { - case ws.recvc <- wr: - case <-ws.donec: - return false - } - return true -} - -// serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { - for { - resp, err := wc.Recv() - if err != nil { - select { - case w.errc <- err: - case <-w.donec: - } - return - } - select { - case w.respc <- resp: - case <-w.donec: - return - } - } -} - -// serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { - if ws.closing { - panic("created substream goroutine but substream is closing") - } - - // nextRev is the minimum expected next revision - nextRev := ws.initReq.rev - resuming := false - defer func() { - if !resuming { - ws.closing = true - } - close(ws.donec) - if !resuming { - w.closingc <- ws - } - w.wg.Done() - }() - - emptyWr := &WatchResponse{} - for { - curWr := emptyWr - outc := ws.outc - - if len(ws.buf) > 0 { - curWr = ws.buf[0] - } else { - outc = nil - } - select { - case outc <- *curWr: - if ws.buf[0].Err() != nil { - return - } - ws.buf[0] = nil - ws.buf = ws.buf[1:] - case wr, ok := <-ws.recvc: - if !ok { - // shutdown from closeSubstream - return - } - - if wr.Created { - if ws.initReq.retc != nil { - ws.initReq.retc <- ws.outc - // to prevent next write from taking the slot in buffered channel - // and posting duplicate create events - ws.initReq.retc = nil - - // send first creation event only if requested - if ws.initReq.createdNotify { - ws.outc <- *wr - } - // once the watch channel is returned, a current revision - // watch must resume at the store revision. This is necessary - // for the following case to work as expected: - // wch := m1.Watch("a") - // m2.Put("a", "b") - // <-wch - // If the revision is only bound on the first observed event, - // if wch is disconnected before the Put is issued, then reconnects - // after it is committed, it'll miss the Put. - if ws.initReq.rev == 0 { - nextRev = wr.Header.Revision - } - } - } else { - // current progress of watch; <= store revision - nextRev = wr.Header.Revision - } - - if len(wr.Events) > 0 { - nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 - } - ws.initReq.rev = nextRev - - // created event is already sent above, - // watcher should not post duplicate events - if wr.Created { - continue - } - - // TODO pause channel if buffer gets too large - ws.buf = append(ws.buf, wr) - case <-w.ctx.Done(): - return - case <-ws.initReq.ctx.Done(): - return - case <-resumec: - resuming = true - return - } - } - // lazily send cancel message if events on missing id -} - -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - // mark all substreams as resuming - close(w.resumec) - w.resumec = make(chan struct{}) - w.joinSubstreams() - for _, ws := range w.substreams { - ws.id = -1 - w.resuming = append(w.resuming, ws) - } - // strip out nils, if any - var resuming []*watcherStream - for _, ws := range w.resuming { - if ws != nil { - resuming = append(resuming, ws) - } - } - w.resuming = resuming - w.substreams = make(map[int64]*watcherStream) - - // connect to grpc stream while accepting watcher cancelation - stopc := make(chan struct{}) - donec := w.waitCancelSubstreams(stopc) - wc, err := w.openWatchClient() - close(stopc) - <-donec - - // serve all non-closing streams, even if there's a client error - // so that the teardown path can shutdown the streams as expected. - for _, ws := range w.resuming { - if ws.closing { - continue - } - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - } - - if err != nil { - return nil, v3rpc.Error(err) - } - - // receive data from new grpc stream - go w.serveWatchClient(wc) - return wc, nil -} - -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { - var wg sync.WaitGroup - wg.Add(len(w.resuming)) - donec := make(chan struct{}) - for i := range w.resuming { - go func(ws *watcherStream) { - defer wg.Done() - if ws.closing { - if ws.initReq.ctx.Err() != nil && ws.outc != nil { - close(ws.outc) - ws.outc = nil - } - return - } - select { - case <-ws.initReq.ctx.Done(): - // closed ws will be removed from resuming - ws.closing = true - close(ws.outc) - ws.outc = nil - w.wg.Add(1) - go func() { - defer w.wg.Done() - w.closingc <- ws - }() - case <-stopc: - } - }(w.resuming[i]) - } - go func() { - defer close(donec) - wg.Wait() - }() - return donec -} - -// joinSubstreams waits for all substream goroutines to complete. -func (w *watchGrpcStream) joinSubstreams() { - for _, ws := range w.substreams { - <-ws.donec - } - for _, ws := range w.resuming { - if ws != nil { - <-ws.donec - } - } -} - -var maxBackoff = 100 * time.Millisecond - -// openWatchClient retries opening a watch client until success or halt. -// manually retry in case "ws==nil && err==nil" -// TODO: remove FailFast=false -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { - backoff := time.Millisecond - for { - select { - case <-w.ctx.Done(): - if err == nil { - return nil, w.ctx.Err() - } - return nil, err - default: - } - if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { - break - } - if isHaltErr(w.ctx, err) { - return nil, v3rpc.Error(err) - } - if isUnavailableErr(w.ctx, err) { - // retry, but backoff - if backoff < maxBackoff { - // 25% backoff factor - backoff = backoff + backoff/4 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - time.Sleep(backoff) - } - } - return ws, nil -} - -// toPB converts an internal watch request structure to its protobuf WatchRequest structure. -func (wr *watchRequest) toPB() *pb.WatchRequest { - req := &pb.WatchCreateRequest{ - StartRevision: wr.rev, - Key: []byte(wr.key), - RangeEnd: []byte(wr.end), - ProgressNotify: wr.progressNotify, - Filters: wr.filters, - PrevKv: wr.prevKV, - } - cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} - -func streamKeyFromCtx(ctx context.Context) string { - if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) - } - return "" -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go deleted file mode 100644 index f72c6a644..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. -package rpctypes diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go deleted file mode 100644 index 55eab38ef..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// server-side error -var ( - ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() - ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() - ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() - ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() - ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() - ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() - ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() - ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() - ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() - - ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() - ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() - ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() - - ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() - ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() - ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() - ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() - ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() - - ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() - ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() - - ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() - ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() - ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() - ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() - ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() - ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() - ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() - ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() - ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() - ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() - ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() - ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() - ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() - ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() - - ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() - ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() - ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() - ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() - ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() - ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() - ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() - ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() - ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() - - errStringToError = map[string]error{ - ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, - ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, - ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, - ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, - - ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, - ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, - ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, - ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, - ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, - - ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, - ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, - ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, - - ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, - ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, - ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, - ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, - ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, - - ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, - ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, - - ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, - ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, - ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, - ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, - ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, - ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, - ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, - ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, - ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, - ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, - ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, - ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, - ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, - ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, - - ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, - ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, - ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, - ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, - ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, - ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, - ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, - ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, - ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, - } -) - -// client-side error -var ( - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrKeyNotFound = Error(ErrGRPCKeyNotFound) - ErrValueProvided = Error(ErrGRPCValueProvided) - ErrLeaseProvided = Error(ErrGRPCLeaseProvided) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) - - ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) - ErrLeaseExist = Error(ErrGRPCLeaseExist) - ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge) - - ErrMemberExist = Error(ErrGRPCMemberExist) - ErrPeerURLExist = Error(ErrGRPCPeerURLExist) - ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) - ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) - ErrMemberNotFound = Error(ErrGRPCMemberNotFound) - - ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) - ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) - - ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) - ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) - ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) - ErrUserEmpty = Error(ErrGRPCUserEmpty) - ErrUserNotFound = Error(ErrGRPCUserNotFound) - ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) - ErrRoleNotFound = Error(ErrGRPCRoleNotFound) - ErrAuthFailed = Error(ErrGRPCAuthFailed) - ErrPermissionDenied = Error(ErrGRPCPermissionDenied) - ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) - ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) - ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) - ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) - ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) - - ErrNoLeader = Error(ErrGRPCNoLeader) - ErrNotLeader = Error(ErrGRPCNotLeader) - ErrNotCapable = Error(ErrGRPCNotCapable) - ErrStopped = Error(ErrGRPCStopped) - ErrTimeout = Error(ErrGRPCTimeout) - ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) - ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) - ErrUnhealthy = Error(ErrGRPCUnhealthy) - ErrCorrupt = Error(ErrGRPCCorrupt) -) - -// EtcdError defines gRPC server errors. -// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) -type EtcdError struct { - code codes.Code - desc string -} - -// Code returns grpc/codes.Code. -// TODO: define clientv3/codes.Code. -func (e EtcdError) Code() codes.Code { - return e.code -} - -func (e EtcdError) Error() string { - return e.desc -} - -func Error(err error) error { - if err == nil { - return nil - } - verr, ok := errStringToError[ErrorDesc(err)] - if !ok { // not gRPC error - return err - } - ev, ok := status.FromError(verr) - var desc string - if ok { - desc = ev.Message() - } else { - desc = verr.Error() - } - return EtcdError{code: ev.Code(), desc: desc} -} - -func ErrorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go deleted file mode 100644 index 5c590e1ae..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -var ( - MetadataRequireLeaderKey = "hasleader" - MetadataHasLeader = "true" -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go deleted file mode 100644 index 90045a5c9..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ /dev/null @@ -1,1035 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: etcdserver.proto - -/* - Package etcdserverpb is a generated protocol buffer package. - - It is generated from these files: - etcdserver.proto - raft_internal.proto - rpc.proto - - It has these top-level messages: - Request - Metadata - RequestHeader - InternalRaftRequest - EmptyResponse - InternalAuthenticateRequest - ResponseHeader - RangeRequest - RangeResponse - PutRequest - PutResponse - DeleteRangeRequest - DeleteRangeResponse - RequestOp - ResponseOp - Compare - TxnRequest - TxnResponse - CompactionRequest - CompactionResponse - HashRequest - HashKVRequest - HashKVResponse - HashResponse - SnapshotRequest - SnapshotResponse - WatchRequest - WatchCreateRequest - WatchCancelRequest - WatchResponse - LeaseGrantRequest - LeaseGrantResponse - LeaseRevokeRequest - LeaseRevokeResponse - LeaseKeepAliveRequest - LeaseKeepAliveResponse - LeaseTimeToLiveRequest - LeaseTimeToLiveResponse - LeaseLeasesRequest - LeaseStatus - LeaseLeasesResponse - Member - MemberAddRequest - MemberAddResponse - MemberRemoveRequest - MemberRemoveResponse - MemberUpdateRequest - MemberUpdateResponse - MemberListRequest - MemberListResponse - DefragmentRequest - DefragmentResponse - MoveLeaderRequest - MoveLeaderResponse - AlarmRequest - AlarmMember - AlarmResponse - StatusRequest - StatusResponse - AuthEnableRequest - AuthDisableRequest - AuthenticateRequest - AuthUserAddRequest - AuthUserGetRequest - AuthUserDeleteRequest - AuthUserChangePasswordRequest - AuthUserGrantRoleRequest - AuthUserRevokeRoleRequest - AuthRoleAddRequest - AuthRoleGetRequest - AuthUserListRequest - AuthRoleListRequest - AuthRoleDeleteRequest - AuthRoleGrantPermissionRequest - AuthRoleRevokePermissionRequest - AuthEnableResponse - AuthDisableResponse - AuthenticateResponse - AuthUserAddResponse - AuthUserGetResponse - AuthUserDeleteResponse - AuthUserChangePasswordResponse - AuthUserGrantRoleResponse - AuthUserRevokeRoleResponse - AuthRoleAddResponse - AuthRoleGetResponse - AuthRoleListResponse - AuthUserListResponse - AuthRoleDeleteResponse - AuthRoleGrantPermissionResponse - AuthRoleRevokePermissionResponse -*/ -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Request struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` - Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` - Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` - Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` - Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` - Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` - Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } - -type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } - -func init() { - proto.RegisterType((*Request)(nil), "etcdserverpb.Request") - proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") -} -func (m *Request) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Request) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) - dAtA[i] = 0x12 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) - i += copy(dAtA[i:], m.Method) - dAtA[i] = 0x1a - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x22 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) - i += copy(dAtA[i:], m.Val) - dAtA[i] = 0x28 - i++ - if m.Dir { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x32 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) - i += copy(dAtA[i:], m.PrevValue) - dAtA[i] = 0x38 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) - if m.PrevExist != nil { - dAtA[i] = 0x40 - i++ - if *m.PrevExist { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x48 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) - dAtA[i] = 0x50 - i++ - if m.Wait { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x58 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) - dAtA[i] = 0x60 - i++ - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.Sorted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x70 - i++ - if m.Quorum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x78 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stream { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Refresh != nil { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if *m.Refresh { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) - dAtA[i] = 0x10 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Request) Size() (n int) { - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.ID)) - l = len(m.Method) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Path) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Val) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 2 - l = len(m.PrevValue) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 1 + sovEtcdserver(uint64(m.PrevIndex)) - if m.PrevExist != nil { - n += 2 - } - n += 1 + sovEtcdserver(uint64(m.Expiration)) - n += 2 - n += 1 + sovEtcdserver(uint64(m.Since)) - n += 2 - n += 2 - n += 2 - n += 1 + sovEtcdserver(uint64(m.Time)) - n += 3 - if m.Refresh != nil { - n += 3 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Metadata) Size() (n int) { - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.NodeID)) - n += 1 + sovEtcdserver(uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovEtcdserver(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozEtcdserver(x uint64) (n int) { - return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Request) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Request: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Val = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Dir = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevValue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) - } - m.PrevIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrevIndex |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PrevExist = &b - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) - } - m.Expiration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Expiration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Wait = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) - } - m.Since = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Since |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Recursive = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Sorted = bool(v != 0) - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Quorum = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - m.Time = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Time |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stream = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Refresh = &b - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) - } - m.ClusterID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEtcdserver(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthEtcdserver - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipEtcdserver(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } - -var fileDescriptorEtcdserver = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto deleted file mode 100644 index 25e0aca5d..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto2"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message Request { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional string Method = 2 [(gogoproto.nullable) = false]; - optional string Path = 3 [(gogoproto.nullable) = false]; - optional string Val = 4 [(gogoproto.nullable) = false]; - optional bool Dir = 5 [(gogoproto.nullable) = false]; - optional string PrevValue = 6 [(gogoproto.nullable) = false]; - optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; - optional bool PrevExist = 8 [(gogoproto.nullable) = true]; - optional int64 Expiration = 9 [(gogoproto.nullable) = false]; - optional bool Wait = 10 [(gogoproto.nullable) = false]; - optional uint64 Since = 11 [(gogoproto.nullable) = false]; - optional bool Recursive = 12 [(gogoproto.nullable) = false]; - optional bool Sorted = 13 [(gogoproto.nullable) = false]; - optional bool Quorum = 14 [(gogoproto.nullable) = false]; - optional int64 Time = 15 [(gogoproto.nullable) = false]; - optional bool Stream = 16 [(gogoproto.nullable) = false]; - optional bool Refresh = 17 [(gogoproto.nullable) = true]; -} - -message Metadata { - optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; - optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go deleted file mode 100644 index 3084c6cbf..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ /dev/null @@ -1,2077 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: raft_internal.proto - -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RequestHeader struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // username is a username that is associated with an auth token of gRPC connection - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - // auth_revision is a revision number of auth.authStore. It is not related to mvcc - AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` -} - -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -type InternalRaftRequest struct { - Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` - Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` - Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` - DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` - Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` - Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` - LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` - LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` - Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` - AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` - AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` - Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` - AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` - AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` - AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` - AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` - AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` - AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` - AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` - AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` - AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` - AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` - AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` - AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` - AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` -} - -func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } -func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } -func (*InternalRaftRequest) ProtoMessage() {} -func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } - -type EmptyResponse struct { -} - -func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } -func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } -func (*EmptyResponse) ProtoMessage() {} -func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -type InternalAuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // simple_token is generated in API layer (etcdserver/v3_server.go) - SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` -} - -func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } -func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*InternalAuthenticateRequest) ProtoMessage() {} -func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRaftInternal, []int{3} -} - -func init() { - proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") - proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") - proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") - proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") -} -func (m *RequestHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - } - if len(m.Username) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - } - if m.AuthRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) - } - return i, nil -} - -func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - } - if m.V2 != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) - n1, err := m.V2.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Range != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) - n2, err := m.Range.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Put != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) - n3, err := m.Put.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.DeleteRange != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) - n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Txn != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) - n5, err := m.Txn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.Compaction != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) - n6, err := m.Compaction.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.LeaseGrant != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) - n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.LeaseRevoke != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) - n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Alarm != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) - n9, err := m.Alarm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.Header != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) - n10, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.AuthEnable != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x3e - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) - n11, err := m.AuthEnable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.AuthDisable != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x3f - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) - n12, err := m.AuthDisable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.Authenticate != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x3f - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) - n13, err := m.Authenticate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.AuthUserAdd != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) - n14, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.AuthUserDelete != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) - n15, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.AuthUserGet != nil { - dAtA[i] = 0xf2 - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) - n16, err := m.AuthUserGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.AuthUserChangePassword != nil { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) - n17, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.AuthUserGrantRole != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) - n18, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.AuthUserRevokeRole != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) - n19, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.AuthUserList != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) - n20, err := m.AuthUserList.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.AuthRoleList != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) - n21, err := m.AuthRoleList.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - } - if m.AuthRoleAdd != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) - n22, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.AuthRoleDelete != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) - n23, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.AuthRoleGet != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) - n24, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.AuthRoleGrantPermission != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) - n25, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - if m.AuthRoleRevokePermission != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) - n26, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - return i, nil -} - -func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - if len(m.SimpleToken) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) - i += copy(dAtA[i:], m.SimpleToken) - } - return i, nil -} - -func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *RequestHeader) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - l = len(m.Username) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRevision != 0 { - n += 1 + sovRaftInternal(uint64(m.AuthRevision)) - } - return n -} - -func (m *InternalRaftRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - if m.V2 != nil { - l = m.V2.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Put != nil { - l = m.Put.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.DeleteRange != nil { - l = m.DeleteRange.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Txn != nil { - l = m.Txn.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Compaction != nil { - l = m.Compaction.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseGrant != nil { - l = m.LeaseGrant.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseRevoke != nil { - l = m.LeaseRevoke.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Alarm != nil { - l = m.Alarm.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Header != nil { - l = m.Header.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthEnable != nil { - l = m.AuthEnable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthDisable != nil { - l = m.AuthDisable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.Authenticate != nil { - l = m.Authenticate.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserAdd != nil { - l = m.AuthUserAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserDelete != nil { - l = m.AuthUserDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGet != nil { - l = m.AuthUserGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserChangePassword != nil { - l = m.AuthUserChangePassword.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGrantRole != nil { - l = m.AuthUserGrantRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserRevokeRole != nil { - l = m.AuthUserRevokeRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserList != nil { - l = m.AuthUserList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleList != nil { - l = m.AuthRoleList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleAdd != nil { - l = m.AuthRoleAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleDelete != nil { - l = m.AuthRoleDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGet != nil { - l = m.AuthRoleGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGrantPermission != nil { - l = m.AuthRoleGrantPermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleRevokePermission != nil { - l = m.AuthRoleRevokePermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - return n -} - -func (m *EmptyResponse) Size() (n int) { - var l int - _ = l - return n -} - -func (m *InternalAuthenticateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.SimpleToken) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - return n -} - -func sovRaftInternal(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRaftInternal(x uint64) (n int) { - return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) - } - m.AuthRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AuthRevision |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.V2 == nil { - m.V2 = &Request{} - } - if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Range == nil { - m.Range = &RangeRequest{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Put == nil { - m.Put = &PutRequest{} - } - if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteRange == nil { - m.DeleteRange = &DeleteRangeRequest{} - } - if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Txn == nil { - m.Txn = &TxnRequest{} - } - if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Compaction == nil { - m.Compaction = &CompactionRequest{} - } - if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseGrant == nil { - m.LeaseGrant = &LeaseGrantRequest{} - } - if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseRevoke == nil { - m.LeaseRevoke = &LeaseRevokeRequest{} - } - if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Alarm == nil { - m.Alarm = &AlarmRequest{} - } - if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &RequestHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthEnable == nil { - m.AuthEnable = &AuthEnableRequest{} - } - if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1011: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthDisable == nil { - m.AuthDisable = &AuthDisableRequest{} - } - if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1012: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Authenticate == nil { - m.Authenticate = &InternalAuthenticateRequest{} - } - if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserAdd == nil { - m.AuthUserAdd = &AuthUserAddRequest{} - } - if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserDelete == nil { - m.AuthUserDelete = &AuthUserDeleteRequest{} - } - if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1102: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGet == nil { - m.AuthUserGet = &AuthUserGetRequest{} - } - if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1103: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserChangePassword == nil { - m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} - } - if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1104: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGrantRole == nil { - m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} - } - if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1105: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserRevokeRole == nil { - m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} - } - if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1106: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserList == nil { - m.AuthUserList = &AuthUserListRequest{} - } - if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1107: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleList == nil { - m.AuthRoleList = &AuthRoleListRequest{} - } - if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1200: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleAdd == nil { - m.AuthRoleAdd = &AuthRoleAddRequest{} - } - if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1201: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleDelete == nil { - m.AuthRoleDelete = &AuthRoleDeleteRequest{} - } - if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1202: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGet == nil { - m.AuthRoleGet = &AuthRoleGetRequest{} - } - if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1203: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGrantPermission == nil { - m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} - } - if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1204: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleRevokePermission == nil { - m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} - } - if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SimpleToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaftInternal(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRaftInternal - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRaftInternal(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } - -var fileDescriptorRaftInternal = []byte{ - // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, - 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, - 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, - 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, - 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, - 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, - 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, - 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, - 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, - 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, - 0x76, 0xbb, 0x8e, 0x4b, 0x68, 0xaa, 0x13, 0x88, 0x96, 0x6d, 0x20, 0xd9, 0xc5, 0xcc, 0x4a, 0xce, - 0x88, 0xde, 0xf1, 0x32, 0x9a, 0xa6, 0x1d, 0xde, 0x32, 0x19, 0x74, 0xed, 0xc0, 0xf6, 0x5c, 0x32, - 0x2a, 0xcb, 0x0a, 0x22, 0x68, 0xe8, 0x58, 0xe5, 0x4f, 0x11, 0xcd, 0x6e, 0xeb, 0xa9, 0x0d, 0x7a, - 0xc8, 0x75, 0xbb, 0x81, 0x46, 0xd7, 0x50, 0xb6, 0x5b, 0x95, 0x2d, 0xf2, 0xd5, 0xf9, 0xd5, 0xde, - 0x75, 0xad, 0xea, 0x12, 0x23, 0xdb, 0xad, 0xe2, 0xfb, 0x68, 0x9c, 0x51, 0xb7, 0x09, 0xb2, 0x57, - 0xbe, 0x5a, 0xea, 0x53, 0x8a, 0x54, 0x28, 0x57, 0x42, 0x7c, 0x0b, 0x8d, 0xfa, 0x1d, 0x4e, 0xc6, - 0xa4, 0x9e, 0x24, 0xf5, 0x7b, 0x9d, 0x70, 0x1e, 0x43, 0x88, 0xf0, 0x3a, 0x2a, 0x58, 0xe0, 0x00, - 0x07, 0x53, 0x35, 0x19, 0x97, 0x45, 0x8b, 0xc9, 0xa2, 0xba, 0x54, 0x24, 0x5a, 0xe5, 0xad, 0x38, - 0x26, 0x1a, 0xf2, 0x63, 0x97, 0x4c, 0xa4, 0x35, 0xdc, 0x3f, 0x76, 0xa3, 0x86, 0xfc, 0xd8, 0xc5, - 0x0f, 0x10, 0x6a, 0x78, 0x6d, 0x9f, 0x36, 0xb8, 0xf8, 0x7e, 0x93, 0xb2, 0xe4, 0x6a, 0xb2, 0x64, - 0x3d, 0xca, 0x87, 0x95, 0x3d, 0x25, 0xf8, 0x21, 0xca, 0x3b, 0x40, 0x03, 0x30, 0x9b, 0x8c, 0xba, - 0x9c, 0x4c, 0xa5, 0x11, 0x76, 0x84, 0x60, 0x53, 0xe4, 0x23, 0x82, 0x13, 0x85, 0xc4, 0x9a, 0x15, - 0x81, 0x41, 0xd7, 0x3b, 0x02, 0x92, 0x4b, 0x5b, 0xb3, 0x44, 0x18, 0x52, 0x10, 0xad, 0xd9, 0x89, - 0x63, 0x62, 0x5b, 0xa8, 0x43, 0x59, 0x9b, 0xa0, 0xb4, 0x6d, 0xa9, 0x89, 0x54, 0xb4, 0x2d, 0x52, - 0x88, 0xd7, 0xd0, 0x44, 0x4b, 0x5a, 0x8e, 0x58, 0xb2, 0x64, 0x21, 0x75, 0xcf, 0x95, 0x2b, 0x0d, - 0x2d, 0xc5, 0x35, 0x94, 0x97, 0x8e, 0x03, 0x97, 0x1e, 0x38, 0x40, 0x7e, 0xa7, 0x7e, 0xb0, 0x5a, - 0x87, 0xb7, 0x36, 0xa4, 0x20, 0x5a, 0x2e, 0x8d, 0x42, 0xb8, 0x8e, 0xa4, 0x3f, 0x4d, 0xcb, 0x0e, - 0x24, 0xe3, 0xef, 0x64, 0xda, 0x7a, 0x05, 0xa3, 0xae, 0x14, 0xd1, 0x7a, 0x69, 0x1c, 0xc3, 0xbb, - 0x8a, 0x02, 0x2e, 0xb7, 0x1b, 0x94, 0x03, 0xf9, 0xa7, 0x28, 0x37, 0x93, 0x94, 0xd0, 0xf7, 0xb5, - 0x1e, 0x69, 0x88, 0x4b, 0xd4, 0xe3, 0x0d, 0x7d, 0x94, 0xc4, 0xd9, 0x32, 0xa9, 0x65, 0x91, 0x8f, - 0x53, 0xc3, 0xc6, 0x7a, 0x1c, 0x00, 0xab, 0x59, 0x56, 0x62, 0x2c, 0x1d, 0xc3, 0xbb, 0x68, 0x26, - 0xc6, 0x28, 0x4f, 0x92, 0x4f, 0x8a, 0xb4, 0x9c, 0x4e, 0xd2, 0x66, 0xd6, 0xb0, 0x22, 0x4d, 0x84, - 0x93, 0x63, 0x35, 0x81, 0x93, 0xcf, 0xe7, 0x8e, 0xb5, 0x09, 0x7c, 0x60, 0xac, 0x4d, 0xe0, 0xb8, - 0x89, 0xae, 0xc4, 0x98, 0x46, 0x4b, 0x9c, 0x12, 0xd3, 0xa7, 0x41, 0xf0, 0xd2, 0x63, 0x16, 0xf9, - 0xa2, 0x90, 0xb7, 0xd3, 0x91, 0xeb, 0x52, 0xbd, 0xa7, 0xc5, 0x21, 0xfd, 0x12, 0x4d, 0x4d, 0xe3, - 0x27, 0x68, 0xae, 0x67, 0x5e, 0x61, 0x6f, 0x93, 0x79, 0x0e, 0x90, 0x53, 0xd5, 0xe3, 0xfa, 0x90, - 0xb1, 0xe5, 0xd1, 0xf0, 0xe2, 0xad, 0xbe, 0x48, 0xfb, 0x33, 0xf8, 0x29, 0x9a, 0x8f, 0xc9, 0xea, - 0xa4, 0x28, 0xf4, 0x57, 0x85, 0xbe, 0x91, 0x8e, 0xd6, 0x47, 0xa6, 0x87, 0x8d, 0xe9, 0x40, 0x0a, - 0x6f, 0xa1, 0x62, 0x0c, 0x77, 0xec, 0x80, 0x93, 0x6f, 0x8a, 0xba, 0x94, 0x4e, 0xdd, 0xb1, 0x03, - 0x9e, 0xf0, 0x51, 0x18, 0x8c, 0x48, 0x62, 0x34, 0x45, 0xfa, 0x3e, 0x94, 0x24, 0x5a, 0x0f, 0x90, - 0xc2, 0x60, 0xb4, 0xf5, 0x92, 0x24, 0x1c, 0xf9, 0x26, 0x37, 0x6c, 0xeb, 0x45, 0x4d, 0xbf, 0x23, - 0x75, 0x2c, 0x72, 0xa4, 0xc4, 0x68, 0x47, 0xbe, 0xcd, 0x0d, 0x73, 0xa4, 0xa8, 0x4a, 0x71, 0x64, - 0x1c, 0x4e, 0x8e, 0x25, 0x1c, 0xf9, 0xee, 0xdc, 0xb1, 0xfa, 0x1d, 0xa9, 0x63, 0xf8, 0x39, 0x2a, - 0xf5, 0x60, 0xa4, 0x51, 0x7c, 0x60, 0x6d, 0x3b, 0x90, 0xf7, 0xd8, 0x7b, 0xc5, 0xbc, 0x33, 0x84, - 0x29, 0xe4, 0x7b, 0x91, 0x3a, 0xe4, 0x5f, 0xa6, 0xe9, 0x79, 0xdc, 0x46, 0x0b, 0x71, 0x2f, 0x6d, - 0x9d, 0x9e, 0x66, 0x1f, 0x54, 0xb3, 0xbb, 0xe9, 0xcd, 0x94, 0x4b, 0x06, 0xbb, 0x11, 0x3a, 0x44, - 0x50, 0xb9, 0x80, 0xa6, 0x37, 0xda, 0x3e, 0x7f, 0x65, 0x40, 0xe0, 0x7b, 0x6e, 0x00, 0x15, 0x1f, - 0x2d, 0x9c, 0xf3, 0x43, 0x84, 0x31, 0x1a, 0x93, 0xb7, 0x7b, 0x46, 0xde, 0xee, 0xf2, 0x59, 0xdc, - 0xfa, 0xd1, 0xf9, 0xd4, 0xb7, 0x7e, 0xf8, 0x8e, 0x97, 0x50, 0x21, 0xb0, 0xdb, 0xbe, 0x03, 0x26, - 0xf7, 0x8e, 0x40, 0x5d, 0xfa, 0x39, 0x23, 0xaf, 0x62, 0xfb, 0x22, 0xf4, 0x68, 0xee, 0xe4, 0x67, - 0x79, 0xe4, 0xe4, 0xac, 0x9c, 0x39, 0x3d, 0x2b, 0x67, 0x7e, 0x9c, 0x95, 0x33, 0xaf, 0x7f, 0x95, - 0x47, 0x0e, 0x26, 0xe4, 0x5f, 0x8e, 0xb5, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc9, 0xfc, - 0x0e, 0xca, 0x08, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto deleted file mode 100644 index 25d45d3c4..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcdserver.proto"; -import "rpc.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message RequestHeader { - uint64 ID = 1; - // username is a username that is associated with an auth token of gRPC connection - string username = 2; - // auth_revision is a revision number of auth.authStore. It is not related to mvcc - uint64 auth_revision = 3; -} - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -message InternalRaftRequest { - RequestHeader header = 100; - uint64 ID = 1; - - Request v2 = 2; - - RangeRequest range = 3; - PutRequest put = 4; - DeleteRangeRequest delete_range = 5; - TxnRequest txn = 6; - CompactionRequest compaction = 7; - - LeaseGrantRequest lease_grant = 8; - LeaseRevokeRequest lease_revoke = 9; - - AlarmRequest alarm = 10; - - AuthEnableRequest auth_enable = 1000; - AuthDisableRequest auth_disable = 1011; - - InternalAuthenticateRequest authenticate = 1012; - - AuthUserAddRequest auth_user_add = 1100; - AuthUserDeleteRequest auth_user_delete = 1101; - AuthUserGetRequest auth_user_get = 1102; - AuthUserChangePasswordRequest auth_user_change_password = 1103; - AuthUserGrantRoleRequest auth_user_grant_role = 1104; - AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; - AuthUserListRequest auth_user_list = 1106; - AuthRoleListRequest auth_role_list = 1107; - - AuthRoleAddRequest auth_role_add = 1200; - AuthRoleDeleteRequest auth_role_delete = 1201; - AuthRoleGetRequest auth_role_get = 1202; - AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; - AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; -} - -message EmptyResponse { -} - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -message InternalAuthenticateRequest { - string name = 1; - string password = 2; - - // simple_token is generated in API layer (etcdserver/v3_server.go) - string simple_token = 3; -} - diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go deleted file mode 100644 index 3d3536a32..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserverpb - -import ( - "fmt" - "strings" - - proto "github.com/golang/protobuf/proto" -) - -// InternalRaftStringer implements custom proto Stringer: -// redact password, replace value fields with value_size fields. -type InternalRaftStringer struct { - Request *InternalRaftRequest -} - -func (as *InternalRaftStringer) String() string { - switch { - case as.Request.LeaseGrant != nil: - return fmt.Sprintf("header:<%s> lease_grant:", - as.Request.Header.String(), - as.Request.LeaseGrant.TTL, - as.Request.LeaseGrant.ID, - ) - case as.Request.LeaseRevoke != nil: - return fmt.Sprintf("header:<%s> lease_revoke:", - as.Request.Header.String(), - as.Request.LeaseRevoke.ID, - ) - case as.Request.Authenticate != nil: - return fmt.Sprintf("header:<%s> authenticate:", - as.Request.Header.String(), - as.Request.Authenticate.Name, - as.Request.Authenticate.SimpleToken, - ) - case as.Request.AuthUserAdd != nil: - return fmt.Sprintf("header:<%s> auth_user_add:", - as.Request.Header.String(), - as.Request.AuthUserAdd.Name, - ) - case as.Request.AuthUserChangePassword != nil: - return fmt.Sprintf("header:<%s> auth_user_change_password:", - as.Request.Header.String(), - as.Request.AuthUserChangePassword.Name, - ) - case as.Request.Put != nil: - return fmt.Sprintf("header:<%s> put:<%s>", - as.Request.Header.String(), - NewLoggablePutRequest(as.Request.Put).String(), - ) - case as.Request.Txn != nil: - return fmt.Sprintf("header:<%s> txn:<%s>", - as.Request.Header.String(), - NewLoggableTxnRequest(as.Request.Txn).String(), - ) - default: - // nothing to redact - } - return as.Request.String() -} - -// txnRequestStringer implements a custom proto String to replace value bytes fields with value size -// fields in any nested txn and put operations. -type txnRequestStringer struct { - Request *TxnRequest -} - -func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer { - return &txnRequestStringer{request} -} - -func (as *txnRequestStringer) String() string { - var compare []string - for _, c := range as.Request.Compare { - switch cv := c.TargetUnion.(type) { - case *Compare_Value: - compare = append(compare, newLoggableValueCompare(c, cv).String()) - default: - // nothing to redact - compare = append(compare, c.String()) - } - } - var success []string - for _, s := range as.Request.Success { - success = append(success, newLoggableRequestOp(s).String()) - } - var failure []string - for _, f := range as.Request.Failure { - failure = append(failure, newLoggableRequestOp(f).String()) - } - return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>", - strings.Join(compare, " "), - strings.Join(success, " "), - strings.Join(failure, " "), - ) -} - -// requestOpStringer implements a custom proto String to replace value bytes fields with value -// size fields in any nested txn and put operations. -type requestOpStringer struct { - Op *RequestOp -} - -func newLoggableRequestOp(op *RequestOp) *requestOpStringer { - return &requestOpStringer{op} -} - -func (as *requestOpStringer) String() string { - switch op := as.Op.Request.(type) { - case *RequestOp_RequestPut: - return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String()) - case *RequestOp_RequestTxn: - return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String()) - default: - // nothing to redact - } - return as.Op.String() -} - -// loggableValueCompare implements a custom proto String for Compare.Value union member types to -// replace the value bytes field with a value size field. -// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here. -type loggableValueCompare struct { - Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"` - Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3"` - ValueSize int `protobuf:"bytes,7,opt,name=value_size,proto3"` - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"` -} - -func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare { - return &loggableValueCompare{ - c.Result, - c.Target, - c.Key, - len(cv.Value), - c.RangeEnd, - } -} - -func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} } -func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) } -func (*loggableValueCompare) ProtoMessage() {} - -// loggablePutRequest implements a custom proto String to replace value bytes field with a value -// size field. -// To preserve proto encoding of the key bytes, a faked out proto type is used here. -type loggablePutRequest struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3"` - ValueSize int `protobuf:"varint,2,opt,name=value_size,proto3"` - Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"` - PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"` - IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"` - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"` -} - -func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest { - return &loggablePutRequest{ - request.Key, - len(request.Value), - request.Lease, - request.PrevKv, - request.IgnoreValue, - request.IgnoreLease, - } -} - -func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} } -func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) } -func (*loggablePutRequest) ProtoMessage() {} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go deleted file mode 100644 index 40147f935..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ /dev/null @@ -1,18665 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: rpc.proto - -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - - authpb "github.com/coreos/etcd/auth/authpb" - - context "golang.org/x/net/context" - - grpc "google.golang.org/grpc" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AlarmType int32 - -const ( - AlarmType_NONE AlarmType = 0 - AlarmType_NOSPACE AlarmType = 1 - AlarmType_CORRUPT AlarmType = 2 -) - -var AlarmType_name = map[int32]string{ - 0: "NONE", - 1: "NOSPACE", - 2: "CORRUPT", -} -var AlarmType_value = map[string]int32{ - "NONE": 0, - "NOSPACE": 1, - "CORRUPT": 2, -} - -func (x AlarmType) String() string { - return proto.EnumName(AlarmType_name, int32(x)) -} -func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } - -type RangeRequest_SortOrder int32 - -const ( - RangeRequest_NONE RangeRequest_SortOrder = 0 - RangeRequest_ASCEND RangeRequest_SortOrder = 1 - RangeRequest_DESCEND RangeRequest_SortOrder = 2 -) - -var RangeRequest_SortOrder_name = map[int32]string{ - 0: "NONE", - 1: "ASCEND", - 2: "DESCEND", -} -var RangeRequest_SortOrder_value = map[string]int32{ - "NONE": 0, - "ASCEND": 1, - "DESCEND": 2, -} - -func (x RangeRequest_SortOrder) String() string { - return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) -} -func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } - -type RangeRequest_SortTarget int32 - -const ( - RangeRequest_KEY RangeRequest_SortTarget = 0 - RangeRequest_VERSION RangeRequest_SortTarget = 1 - RangeRequest_CREATE RangeRequest_SortTarget = 2 - RangeRequest_MOD RangeRequest_SortTarget = 3 - RangeRequest_VALUE RangeRequest_SortTarget = 4 -) - -var RangeRequest_SortTarget_name = map[int32]string{ - 0: "KEY", - 1: "VERSION", - 2: "CREATE", - 3: "MOD", - 4: "VALUE", -} -var RangeRequest_SortTarget_value = map[string]int32{ - "KEY": 0, - "VERSION": 1, - "CREATE": 2, - "MOD": 3, - "VALUE": 4, -} - -func (x RangeRequest_SortTarget) String() string { - return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) -} -func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } - -type Compare_CompareResult int32 - -const ( - Compare_EQUAL Compare_CompareResult = 0 - Compare_GREATER Compare_CompareResult = 1 - Compare_LESS Compare_CompareResult = 2 - Compare_NOT_EQUAL Compare_CompareResult = 3 -) - -var Compare_CompareResult_name = map[int32]string{ - 0: "EQUAL", - 1: "GREATER", - 2: "LESS", - 3: "NOT_EQUAL", -} -var Compare_CompareResult_value = map[string]int32{ - "EQUAL": 0, - "GREATER": 1, - "LESS": 2, - "NOT_EQUAL": 3, -} - -func (x Compare_CompareResult) String() string { - return proto.EnumName(Compare_CompareResult_name, int32(x)) -} -func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } - -type Compare_CompareTarget int32 - -const ( - Compare_VERSION Compare_CompareTarget = 0 - Compare_CREATE Compare_CompareTarget = 1 - Compare_MOD Compare_CompareTarget = 2 - Compare_VALUE Compare_CompareTarget = 3 - Compare_LEASE Compare_CompareTarget = 4 -) - -var Compare_CompareTarget_name = map[int32]string{ - 0: "VERSION", - 1: "CREATE", - 2: "MOD", - 3: "VALUE", - 4: "LEASE", -} -var Compare_CompareTarget_value = map[string]int32{ - "VERSION": 0, - "CREATE": 1, - "MOD": 2, - "VALUE": 3, - "LEASE": 4, -} - -func (x Compare_CompareTarget) String() string { - return proto.EnumName(Compare_CompareTarget_name, int32(x)) -} -func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } - -type WatchCreateRequest_FilterType int32 - -const ( - // filter out put event. - WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 - // filter out delete event. - WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 -) - -var WatchCreateRequest_FilterType_name = map[int32]string{ - 0: "NOPUT", - 1: "NODELETE", -} -var WatchCreateRequest_FilterType_value = map[string]int32{ - "NOPUT": 0, - "NODELETE": 1, -} - -func (x WatchCreateRequest_FilterType) String() string { - return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) -} -func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{21, 0} -} - -type AlarmRequest_AlarmAction int32 - -const ( - AlarmRequest_GET AlarmRequest_AlarmAction = 0 - AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 - AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 -) - -var AlarmRequest_AlarmAction_name = map[int32]string{ - 0: "GET", - 1: "ACTIVATE", - 2: "DEACTIVATE", -} -var AlarmRequest_AlarmAction_value = map[string]int32{ - "GET": 0, - "ACTIVATE": 1, - "DEACTIVATE": 2, -} - -func (x AlarmRequest_AlarmAction) String() string { - return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) -} -func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{48, 0} -} - -type ResponseHeader struct { - // cluster_id is the ID of the cluster which sent the response. - ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // member_id is the ID of the member which sent the response. - MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` - // revision is the key-value store revision when the request was applied. - Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` - // raft_term is the raft term when the request was applied. - RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` -} - -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } - -func (m *ResponseHeader) GetClusterId() uint64 { - if m != nil { - return m.ClusterId - } - return 0 -} - -func (m *ResponseHeader) GetMemberId() uint64 { - if m != nil { - return m.MemberId - } - return 0 -} - -func (m *ResponseHeader) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *ResponseHeader) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - -type RangeRequest struct { - // key is the first key for the range. If range_end is not given, the request only looks up key. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. - Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` - // sort_order is the order for returned sorted results. - SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` - // sort_target is the key-value field to use for sorting. - SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` - // keys_only when set returns only the keys and not the values. - KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` - // count_only when set returns only the count of the keys in the range. - CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. - MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` -} - -func (m *RangeRequest) Reset() { *m = RangeRequest{} } -func (m *RangeRequest) String() string { return proto.CompactTextString(m) } -func (*RangeRequest) ProtoMessage() {} -func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } - -func (m *RangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *RangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *RangeRequest) GetLimit() int64 { - if m != nil { - return m.Limit - } - return 0 -} - -func (m *RangeRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { - if m != nil { - return m.SortOrder - } - return RangeRequest_NONE -} - -func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { - if m != nil { - return m.SortTarget - } - return RangeRequest_KEY -} - -func (m *RangeRequest) GetSerializable() bool { - if m != nil { - return m.Serializable - } - return false -} - -func (m *RangeRequest) GetKeysOnly() bool { - if m != nil { - return m.KeysOnly - } - return false -} - -func (m *RangeRequest) GetCountOnly() bool { - if m != nil { - return m.CountOnly - } - return false -} - -func (m *RangeRequest) GetMinModRevision() int64 { - if m != nil { - return m.MinModRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxModRevision() int64 { - if m != nil { - return m.MaxModRevision - } - return 0 -} - -func (m *RangeRequest) GetMinCreateRevision() int64 { - if m != nil { - return m.MinCreateRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxCreateRevision() int64 { - if m != nil { - return m.MaxCreateRevision - } - return 0 -} - -type RangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` - // more indicates if there are more keys to return in the requested range. - More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` - // count is set to the number of keys within the range when requested. - Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` -} - -func (m *RangeResponse) Reset() { *m = RangeResponse{} } -func (m *RangeResponse) String() string { return proto.CompactTextString(m) } -func (*RangeResponse) ProtoMessage() {} -func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } - -func (m *RangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { - if m != nil { - return m.Kvs - } - return nil -} - -func (m *RangeResponse) GetMore() bool { - if m != nil { - return m.More - } - return false -} - -func (m *RangeResponse) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -type PutRequest struct { - // key is the key, in bytes, to put into the key-value store. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // value is the value, in bytes, to associate with the key in the key-value store. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } - -func (m *PutRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *PutRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -func (m *PutRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -func (m *PutRequest) GetIgnoreValue() bool { - if m != nil { - return m.IgnoreValue - } - return false -} - -func (m *PutRequest) GetIgnoreLease() bool { - if m != nil { - return m.IgnoreLease - } - return false -} - -type PutResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // if prev_kv is set in the request, the previous key-value pair will be returned. - PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } - -func (m *PutResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { - if m != nil { - return m.PrevKv - } - return nil -} - -type DeleteRangeRequest struct { - // key is the first key to delete in the range. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. - PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } -func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeRequest) ProtoMessage() {} -func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } - -func (m *DeleteRangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *DeleteRangeRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -type DeleteRangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // deleted is the number of keys deleted by the delete range request. - Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` - // if prev_kv is set in the request, the previous key-value pairs will be returned. - PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` -} - -func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } -func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeResponse) ProtoMessage() {} -func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } - -func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRangeResponse) GetDeleted() int64 { - if m != nil { - return m.Deleted - } - return 0 -} - -func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { - if m != nil { - return m.PrevKvs - } - return nil -} - -type RequestOp struct { - // request is a union of request types accepted by a transaction. - // - // Types that are valid to be assigned to Request: - // *RequestOp_RequestRange - // *RequestOp_RequestPut - // *RequestOp_RequestDeleteRange - // *RequestOp_RequestTxn - Request isRequestOp_Request `protobuf_oneof:"request"` -} - -func (m *RequestOp) Reset() { *m = RequestOp{} } -func (m *RequestOp) String() string { return proto.CompactTextString(m) } -func (*RequestOp) ProtoMessage() {} -func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } - -type isRequestOp_Request interface { - isRequestOp_Request() - MarshalTo([]byte) (int, error) - Size() int -} - -type RequestOp_RequestRange struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` -} -type RequestOp_RequestPut struct { - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` -} -type RequestOp_RequestDeleteRange struct { - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` -} -type RequestOp_RequestTxn struct { - RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` -} - -func (*RequestOp_RequestRange) isRequestOp_Request() {} -func (*RequestOp_RequestPut) isRequestOp_Request() {} -func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} -func (*RequestOp_RequestTxn) isRequestOp_Request() {} - -func (m *RequestOp) GetRequest() isRequestOp_Request { - if m != nil { - return m.Request - } - return nil -} - -func (m *RequestOp) GetRequestRange() *RangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { - return x.RequestRange - } - return nil -} - -func (m *RequestOp) GetRequestPut() *PutRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { - return x.RequestPut - } - return nil -} - -func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { - return x.RequestDeleteRange - } - return nil -} - -func (m *RequestOp) GetRequestTxn() *TxnRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { - return x.RequestTxn - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ - (*RequestOp_RequestRange)(nil), - (*RequestOp_RequestPut)(nil), - (*RequestOp_RequestDeleteRange)(nil), - (*RequestOp_RequestTxn)(nil), - } -} - -func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*RequestOp) - // request - switch x := m.Request.(type) { - case *RequestOp_RequestRange: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestRange); err != nil { - return err - } - case *RequestOp_RequestPut: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestPut); err != nil { - return err - } - case *RequestOp_RequestDeleteRange: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { - return err - } - case *RequestOp_RequestTxn: - _ = b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestTxn); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("RequestOp.Request has unexpected type %T", x) - } - return nil -} - -func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*RequestOp) - switch tag { - case 1: // request.request_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RangeRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestRange{msg} - return true, err - case 2: // request.request_put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PutRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestPut{msg} - return true, err - case 3: // request.request_delete_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeleteRangeRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestDeleteRange{msg} - return true, err - case 4: // request.request_txn - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TxnRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestTxn{msg} - return true, err - default: - return false, nil - } -} - -func _RequestOp_OneofSizer(msg proto.Message) (n int) { - m := msg.(*RequestOp) - // request - switch x := m.Request.(type) { - case *RequestOp_RequestRange: - s := proto.Size(x.RequestRange) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestPut: - s := proto.Size(x.RequestPut) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestDeleteRange: - s := proto.Size(x.RequestDeleteRange) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestTxn: - s := proto.Size(x.RequestTxn) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ResponseOp struct { - // response is a union of response types returned by a transaction. - // - // Types that are valid to be assigned to Response: - // *ResponseOp_ResponseRange - // *ResponseOp_ResponsePut - // *ResponseOp_ResponseDeleteRange - // *ResponseOp_ResponseTxn - Response isResponseOp_Response `protobuf_oneof:"response"` -} - -func (m *ResponseOp) Reset() { *m = ResponseOp{} } -func (m *ResponseOp) String() string { return proto.CompactTextString(m) } -func (*ResponseOp) ProtoMessage() {} -func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } - -type isResponseOp_Response interface { - isResponseOp_Response() - MarshalTo([]byte) (int, error) - Size() int -} - -type ResponseOp_ResponseRange struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` -} -type ResponseOp_ResponsePut struct { - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` -} -type ResponseOp_ResponseDeleteRange struct { - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` -} -type ResponseOp_ResponseTxn struct { - ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` -} - -func (*ResponseOp_ResponseRange) isResponseOp_Response() {} -func (*ResponseOp_ResponsePut) isResponseOp_Response() {} -func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} -func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} - -func (m *ResponseOp) GetResponse() isResponseOp_Response { - if m != nil { - return m.Response - } - return nil -} - -func (m *ResponseOp) GetResponseRange() *RangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { - return x.ResponseRange - } - return nil -} - -func (m *ResponseOp) GetResponsePut() *PutResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { - return x.ResponsePut - } - return nil -} - -func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { - return x.ResponseDeleteRange - } - return nil -} - -func (m *ResponseOp) GetResponseTxn() *TxnResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { - return x.ResponseTxn - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ - (*ResponseOp_ResponseRange)(nil), - (*ResponseOp_ResponsePut)(nil), - (*ResponseOp_ResponseDeleteRange)(nil), - (*ResponseOp_ResponseTxn)(nil), - } -} - -func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseOp) - // response - switch x := m.Response.(type) { - case *ResponseOp_ResponseRange: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseRange); err != nil { - return err - } - case *ResponseOp_ResponsePut: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponsePut); err != nil { - return err - } - case *ResponseOp_ResponseDeleteRange: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { - return err - } - case *ResponseOp_ResponseTxn: - _ = b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseTxn); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) - } - return nil -} - -func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseOp) - switch tag { - case 1: // response.response_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RangeResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseRange{msg} - return true, err - case 2: // response.response_put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PutResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponsePut{msg} - return true, err - case 3: // response.response_delete_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeleteRangeResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseDeleteRange{msg} - return true, err - case 4: // response.response_txn - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TxnResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseTxn{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseOp_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseOp) - // response - switch x := m.Response.(type) { - case *ResponseOp_ResponseRange: - s := proto.Size(x.ResponseRange) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponsePut: - s := proto.Size(x.ResponsePut) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponseDeleteRange: - s := proto.Size(x.ResponseDeleteRange) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponseTxn: - s := proto.Size(x.ResponseTxn) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Compare struct { - // result is logical comparison operation for this comparison. - Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` - // target is the key-value field to inspect for the comparison. - Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` - // key is the subject key for the comparison operation. - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - // Types that are valid to be assigned to TargetUnion: - // *Compare_Version - // *Compare_CreateRevision - // *Compare_ModRevision - // *Compare_Value - // *Compare_Lease - TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` - // range_end compares the given target to all keys in the range [key, range_end). - // See RangeRequest for more details on key ranges. - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *Compare) Reset() { *m = Compare{} } -func (m *Compare) String() string { return proto.CompactTextString(m) } -func (*Compare) ProtoMessage() {} -func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } - -type isCompare_TargetUnion interface { - isCompare_TargetUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type Compare_Version struct { - Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` -} -type Compare_CreateRevision struct { - CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` -} -type Compare_ModRevision struct { - ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` -} -type Compare_Value struct { - Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` -} -type Compare_Lease struct { - Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"` -} - -func (*Compare_Version) isCompare_TargetUnion() {} -func (*Compare_CreateRevision) isCompare_TargetUnion() {} -func (*Compare_ModRevision) isCompare_TargetUnion() {} -func (*Compare_Value) isCompare_TargetUnion() {} -func (*Compare_Lease) isCompare_TargetUnion() {} - -func (m *Compare) GetTargetUnion() isCompare_TargetUnion { - if m != nil { - return m.TargetUnion - } - return nil -} - -func (m *Compare) GetResult() Compare_CompareResult { - if m != nil { - return m.Result - } - return Compare_EQUAL -} - -func (m *Compare) GetTarget() Compare_CompareTarget { - if m != nil { - return m.Target - } - return Compare_VERSION -} - -func (m *Compare) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Compare) GetVersion() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Version); ok { - return x.Version - } - return 0 -} - -func (m *Compare) GetCreateRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { - return x.CreateRevision - } - return 0 -} - -func (m *Compare) GetModRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { - return x.ModRevision - } - return 0 -} - -func (m *Compare) GetValue() []byte { - if x, ok := m.GetTargetUnion().(*Compare_Value); ok { - return x.Value - } - return nil -} - -func (m *Compare) GetLease() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { - return x.Lease - } - return 0 -} - -func (m *Compare) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ - (*Compare_Version)(nil), - (*Compare_CreateRevision)(nil), - (*Compare_ModRevision)(nil), - (*Compare_Value)(nil), - (*Compare_Lease)(nil), - } -} - -func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Compare) - // target_union - switch x := m.TargetUnion.(type) { - case *Compare_Version: - _ = b.EncodeVarint(4<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.Version)) - case *Compare_CreateRevision: - _ = b.EncodeVarint(5<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.CreateRevision)) - case *Compare_ModRevision: - _ = b.EncodeVarint(6<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.ModRevision)) - case *Compare_Value: - _ = b.EncodeVarint(7<<3 | proto.WireBytes) - _ = b.EncodeRawBytes(x.Value) - case *Compare_Lease: - _ = b.EncodeVarint(8<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.Lease)) - case nil: - default: - return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) - } - return nil -} - -func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Compare) - switch tag { - case 4: // target_union.version - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_Version{int64(x)} - return true, err - case 5: // target_union.create_revision - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_CreateRevision{int64(x)} - return true, err - case 6: // target_union.mod_revision - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_ModRevision{int64(x)} - return true, err - case 7: // target_union.value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.TargetUnion = &Compare_Value{x} - return true, err - case 8: // target_union.lease - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_Lease{int64(x)} - return true, err - default: - return false, nil - } -} - -func _Compare_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Compare) - // target_union - switch x := m.TargetUnion.(type) { - case *Compare_Version: - n += proto.SizeVarint(4<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Version)) - case *Compare_CreateRevision: - n += proto.SizeVarint(5<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.CreateRevision)) - case *Compare_ModRevision: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.ModRevision)) - case *Compare_Value: - n += proto.SizeVarint(7<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Value))) - n += len(x.Value) - case *Compare_Lease: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Lease)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -type TxnRequest struct { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` - // success is a list of requests which will be applied when compare evaluates to true. - Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` - // failure is a list of requests which will be applied when compare evaluates to false. - Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` -} - -func (m *TxnRequest) Reset() { *m = TxnRequest{} } -func (m *TxnRequest) String() string { return proto.CompactTextString(m) } -func (*TxnRequest) ProtoMessage() {} -func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } - -func (m *TxnRequest) GetCompare() []*Compare { - if m != nil { - return m.Compare - } - return nil -} - -func (m *TxnRequest) GetSuccess() []*RequestOp { - if m != nil { - return m.Success - } - return nil -} - -func (m *TxnRequest) GetFailure() []*RequestOp { - if m != nil { - return m.Failure - } - return nil -} - -type TxnResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // succeeded is set to true if the compare evaluated to true or false otherwise. - Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` -} - -func (m *TxnResponse) Reset() { *m = TxnResponse{} } -func (m *TxnResponse) String() string { return proto.CompactTextString(m) } -func (*TxnResponse) ProtoMessage() {} -func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } - -func (m *TxnResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TxnResponse) GetSucceeded() bool { - if m != nil { - return m.Succeeded - } - return false -} - -func (m *TxnResponse) GetResponses() []*ResponseOp { - if m != nil { - return m.Responses - } - return nil -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -type CompactionRequest struct { - // revision is the key-value store revision for the compaction operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` -} - -func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } -func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } -func (*CompactionRequest) ProtoMessage() {} -func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } - -func (m *CompactionRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *CompactionRequest) GetPhysical() bool { - if m != nil { - return m.Physical - } - return false -} - -type CompactionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } -func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } -func (*CompactionResponse) ProtoMessage() {} -func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } - -func (m *CompactionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type HashRequest struct { -} - -func (m *HashRequest) Reset() { *m = HashRequest{} } -func (m *HashRequest) String() string { return proto.CompactTextString(m) } -func (*HashRequest) ProtoMessage() {} -func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } - -type HashKVRequest struct { - // revision is the key-value store revision for the hash operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` -} - -func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } -func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } -func (*HashKVRequest) ProtoMessage() {} -func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } - -func (m *HashKVRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -type HashKVResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // hash is the hash value computed from the responding member's MVCC keys up to a given revision. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` - // compact_revision is the compacted revision of key-value store when hash begins. - CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` -} - -func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } -func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } -func (*HashKVResponse) ProtoMessage() {} -func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } - -func (m *HashKVResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *HashKVResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - -func (m *HashKVResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -type HashResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // hash is the hash value computed from the responding member's KV's backend. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *HashResponse) Reset() { *m = HashResponse{} } -func (m *HashResponse) String() string { return proto.CompactTextString(m) } -func (*HashResponse) ProtoMessage() {} -func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } - -func (m *HashResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *HashResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - -type SnapshotRequest struct { -} - -func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } -func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotRequest) ProtoMessage() {} -func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } - -type SnapshotResponse struct { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // remaining_bytes is the number of blob bytes to be sent after this message - RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` - // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` -} - -func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } -func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotResponse) ProtoMessage() {} -func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } - -func (m *SnapshotResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *SnapshotResponse) GetRemainingBytes() uint64 { - if m != nil { - return m.RemainingBytes - } - return 0 -} - -func (m *SnapshotResponse) GetBlob() []byte { - if m != nil { - return m.Blob - } - return nil -} - -type WatchRequest struct { - // request_union is a request to either create a new watcher or cancel an existing watcher. - // - // Types that are valid to be assigned to RequestUnion: - // *WatchRequest_CreateRequest - // *WatchRequest_CancelRequest - RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } - -type isWatchRequest_RequestUnion interface { - isWatchRequest_RequestUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type WatchRequest_CreateRequest struct { - CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` -} -type WatchRequest_CancelRequest struct { - CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` -} - -func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} -func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} - -func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { - if m != nil { - return m.RequestUnion - } - return nil -} - -func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { - return x.CreateRequest - } - return nil -} - -func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { - return x.CancelRequest - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ - (*WatchRequest_CreateRequest)(nil), - (*WatchRequest_CancelRequest)(nil), - } -} - -func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*WatchRequest) - // request_union - switch x := m.RequestUnion.(type) { - case *WatchRequest_CreateRequest: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.CreateRequest); err != nil { - return err - } - case *WatchRequest_CancelRequest: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.CancelRequest); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) - } - return nil -} - -func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*WatchRequest) - switch tag { - case 1: // request_union.create_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WatchCreateRequest) - err := b.DecodeMessage(msg) - m.RequestUnion = &WatchRequest_CreateRequest{msg} - return true, err - case 2: // request_union.cancel_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WatchCancelRequest) - err := b.DecodeMessage(msg) - m.RequestUnion = &WatchRequest_CancelRequest{msg} - return true, err - default: - return false, nil - } -} - -func _WatchRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*WatchRequest) - // request_union - switch x := m.RequestUnion.(type) { - case *WatchRequest_CreateRequest: - s := proto.Size(x.CreateRequest) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *WatchRequest_CancelRequest: - s := proto.Size(x.CancelRequest) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type WatchCreateRequest struct { - // key is the key to register for watching. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` - // filters filter the events at server side before it sends back to the watcher. - Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } -func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCreateRequest) ProtoMessage() {} -func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } - -func (m *WatchCreateRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *WatchCreateRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *WatchCreateRequest) GetStartRevision() int64 { - if m != nil { - return m.StartRevision - } - return 0 -} - -func (m *WatchCreateRequest) GetProgressNotify() bool { - if m != nil { - return m.ProgressNotify - } - return false -} - -func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { - if m != nil { - return m.Filters - } - return nil -} - -func (m *WatchCreateRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -type WatchCancelRequest struct { - // watch_id is the watcher id to cancel so that no more events are transmitted. - WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` -} - -func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } -func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCancelRequest) ProtoMessage() {} -func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } - -func (m *WatchCancelRequest) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -type WatchResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // watch_id is the ID of the watcher that corresponds to the response. - WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - // cancel_reason indicates the reason for canceling the watcher. - CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } - -func (m *WatchResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *WatchResponse) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -func (m *WatchResponse) GetCreated() bool { - if m != nil { - return m.Created - } - return false -} - -func (m *WatchResponse) GetCanceled() bool { - if m != nil { - return m.Canceled - } - return false -} - -func (m *WatchResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -func (m *WatchResponse) GetCancelReason() string { - if m != nil { - return m.CancelReason - } - return "" -} - -func (m *WatchResponse) GetEvents() []*mvccpb.Event { - if m != nil { - return m.Events - } - return nil -} - -type LeaseGrantRequest struct { - // TTL is the advisory time-to-live in seconds. Expired lease will return -1. - TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } -func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantRequest) ProtoMessage() {} -func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } - -func (m *LeaseGrantRequest) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseGrantResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID for the granted lease. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the server chosen lease time-to-live in seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } -func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantResponse) ProtoMessage() {} -func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } - -func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseGrantResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseGrantResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type LeaseRevokeRequest struct { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } -func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeRequest) ProtoMessage() {} -func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } - -func (m *LeaseRevokeRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseRevokeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } -func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeResponse) ProtoMessage() {} -func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } - -func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseKeepAliveRequest struct { - // ID is the lease ID for the lease to keep alive. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } -func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveRequest) ProtoMessage() {} -func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } - -func (m *LeaseKeepAliveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseKeepAliveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the new time-to-live for the lease. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` -} - -func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } -func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveResponse) ProtoMessage() {} -func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } - -func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseKeepAliveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseKeepAliveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -type LeaseTimeToLiveRequest struct { - // ID is the lease ID for the lease. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // keys is true to query all the keys attached to this lease. - Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` -} - -func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } -func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveRequest) ProtoMessage() {} -func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } - -func (m *LeaseTimeToLiveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveRequest) GetKeys() bool { - if m != nil { - return m.Keys - } - return false -} - -type LeaseTimeToLiveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` - // Keys is the list of keys attached to this lease. - Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` -} - -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } - -func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseTimeToLiveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { - if m != nil { - return m.GrantedTTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { - if m != nil { - return m.Keys - } - return nil -} - -type LeaseLeasesRequest struct { -} - -func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } -func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesRequest) ProtoMessage() {} -func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } - -type LeaseStatus struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } -func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } -func (*LeaseStatus) ProtoMessage() {} -func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } - -func (m *LeaseStatus) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseLeasesResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` -} - -func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } -func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesResponse) ProtoMessage() {} -func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } - -func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { - if m != nil { - return m.Leases - } - return nil -} - -type Member struct { - // ID is the member ID for this member. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // peerURLs is the list of URLs the member exposes to the cluster for communication. - PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } - -func (m *Member) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Member) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Member) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -func (m *Member) GetClientURLs() []string { - if m != nil { - return m.ClientURLs - } - return nil -} - -type MemberAddRequest struct { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` -} - -func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } -func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } -func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } - -func (m *MemberAddRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -type MemberAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // member is the member information for the added member. - Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` - // members is a list of all members after adding the new member. - Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } -func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } -func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } - -func (m *MemberAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberAddResponse) GetMember() *Member { - if m != nil { - return m.Member - } - return nil -} - -func (m *MemberAddResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberRemoveRequest struct { - // ID is the member ID of the member to remove. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } -func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } - -func (m *MemberRemoveRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -type MemberRemoveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members after removing the member. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } -func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } - -func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberRemoveResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberUpdateRequest struct { - // ID is the member ID of the member to update. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` -} - -func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } -func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } - -func (m *MemberUpdateRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *MemberUpdateRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -type MemberUpdateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members after updating the member. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } -func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } - -func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberUpdateResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberListRequest struct { -} - -func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } -func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } -func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } - -type MemberListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members associated with the cluster. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } -func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } -func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } - -func (m *MemberListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberListResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type DefragmentRequest struct { -} - -func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } -func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } -func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } - -type DefragmentResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } -func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } -func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } - -func (m *DefragmentResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type MoveLeaderRequest struct { - // targetID is the node ID for the new leader. - TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` -} - -func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } -func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderRequest) ProtoMessage() {} -func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } - -func (m *MoveLeaderRequest) GetTargetID() uint64 { - if m != nil { - return m.TargetID - } - return 0 -} - -type MoveLeaderResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } -func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderResponse) ProtoMessage() {} -func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } - -func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AlarmRequest struct { - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm to consider for this request. - Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` -} - -func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } -func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } -func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } - -func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { - if m != nil { - return m.Action - } - return AlarmRequest_GET -} - -func (m *AlarmRequest) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmRequest) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - -type AlarmMember struct { - // memberID is the ID of the member associated with the raised alarm. - MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm which has been raised. - Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` -} - -func (m *AlarmMember) Reset() { *m = AlarmMember{} } -func (m *AlarmMember) String() string { return proto.CompactTextString(m) } -func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } - -func (m *AlarmMember) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmMember) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - -type AlarmResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // alarms is a list of alarms associated with the alarm request. - Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` -} - -func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } -func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } -func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } - -func (m *AlarmResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AlarmResponse) GetAlarms() []*AlarmMember { - if m != nil { - return m.Alarms - } - return nil -} - -type StatusRequest struct { -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } - -type StatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // version is the cluster protocol version used by the responding member. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database, in bytes, of the responding member. - DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` - // leader is the member ID which the responding member believes is the current leader. - Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft index of the responding member. - RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` - // raftTerm is the current raft term of the responding member. - RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } - -func (m *StatusResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *StatusResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *StatusResponse) GetDbSize() int64 { - if m != nil { - return m.DbSize - } - return 0 -} - -func (m *StatusResponse) GetLeader() uint64 { - if m != nil { - return m.Leader - } - return 0 -} - -func (m *StatusResponse) GetRaftIndex() uint64 { - if m != nil { - return m.RaftIndex - } - return 0 -} - -func (m *StatusResponse) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - -type AuthEnableRequest struct { -} - -func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } -func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } - -type AuthDisableRequest struct { -} - -func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } -func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } - -type AuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } -func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } - -func (m *AuthenticateRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthenticateRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserAddRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } -func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } - -func (m *AuthUserAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserAddRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserGetRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } -func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } - -func (m *AuthUserGetRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthUserDeleteRequest struct { - // name is the name of the user to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } -func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } - -func (m *AuthUserDeleteRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthUserChangePasswordRequest struct { - // name is the name of the user whose password is being changed. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // password is the new password for the user. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } -func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordRequest) ProtoMessage() {} -func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{59} -} - -func (m *AuthUserChangePasswordRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserChangePasswordRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserGrantRoleRequest struct { - // user is the name of the user which should be granted a given role. - User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` - // role is the name of the role to grant to the user. - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } -func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } - -func (m *AuthUserGrantRoleRequest) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *AuthUserGrantRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthUserRevokeRoleRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } -func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } - -func (m *AuthUserRevokeRoleRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserRevokeRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthRoleAddRequest struct { - // name is the name of the role to add to the authentication system. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } -func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } - -func (m *AuthRoleAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthRoleGetRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } -func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } - -func (m *AuthRoleGetRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthUserListRequest struct { -} - -func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } -func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } - -type AuthRoleListRequest struct { -} - -func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } -func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } - -type AuthRoleDeleteRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } -func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } - -func (m *AuthRoleDeleteRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthRoleGrantPermissionRequest struct { - // name is the name of the role which will be granted the permission. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // perm is the permission to grant to the role. - Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` -} - -func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } -func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} -func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{67} -} - -func (m *AuthRoleGrantPermissionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleRevokePermissionRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } -func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} -func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{68} -} - -func (m *AuthRoleRevokePermissionRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { - if m != nil { - return m.RangeEnd - } - return "" -} - -type AuthEnableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } -func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } - -func (m *AuthEnableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthDisableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } -func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } - -func (m *AuthDisableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthenticateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // token is an authorized token that can be used in succeeding RPCs - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` -} - -func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } -func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } -func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } - -func (m *AuthenticateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthenticateResponse) GetToken() string { - if m != nil { - return m.Token - } - return "" -} - -type AuthUserAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } -func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } - -func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` -} - -func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } -func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } - -func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthUserGetResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - -type AuthUserDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } -func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } - -func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserChangePasswordResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } -func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordResponse) ProtoMessage() {} -func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{75} -} - -func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGrantRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } -func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } - -func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserRevokeRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } -func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } - -func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } -func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } - -func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` -} - -func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } -func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } - -func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` -} - -func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } -func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } - -func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleListResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - -type AuthUserListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` -} - -func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } -func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{81} } - -func (m *AuthUserListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthUserListResponse) GetUsers() []string { - if m != nil { - return m.Users - } - return nil -} - -type AuthRoleDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } -func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } - -func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGrantPermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } -func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} -func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{83} -} - -func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleRevokePermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } -func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} -func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{84} -} - -func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") - proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") - proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") - proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") - proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") - proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") - proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") - proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") - proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") - proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") - proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") - proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") - proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") - proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") - proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") - proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") - proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") - proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") - proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") - proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") - proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") - proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") - proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") - proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") - proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") - proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") - proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") - proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") - proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") - proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") - proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") - proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") - proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") - proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") - proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") - proto.RegisterType((*Member)(nil), "etcdserverpb.Member") - proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") - proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") - proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") - proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") - proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") - proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") - proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") - proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") - proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") - proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") - proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") - proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") - proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") - proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") - proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") - proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") - proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") - proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") - proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") - proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") - proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") - proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") - proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") - proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") - proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") - proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") - proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") - proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") - proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") - proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") - proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") - proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") - proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") - proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") - proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") - proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") - proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") - proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") - proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") - proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") - proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") - proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") - proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") - proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") - proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") - proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") - proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") - proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") - proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) - proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) - proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for KV service - -type KVClient interface { - // Range gets the keys in the range from the key-value store. - Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) -} - -type kVClient struct { - cc *grpc.ClientConn -} - -func NewKVClient(cc *grpc.ClientConn) KVClient { - return &kVClient{cc} -} - -func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { - out := new(RangeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { - out := new(PutResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { - out := new(DeleteRangeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { - out := new(TxnResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { - out := new(CompactionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for KV service - -type KVServer interface { - // Range gets the keys in the range from the key-value store. - Range(context.Context, *RangeRequest) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(context.Context, *PutRequest) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(context.Context, *TxnRequest) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) -} - -func RegisterKVServer(s *grpc.Server, srv KVServer) { - s.RegisterService(&_KV_serviceDesc, srv) -} - -func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Range(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Range", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Range(ctx, req.(*RangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PutRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Put(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Put", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Put(ctx, req.(*PutRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).DeleteRange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/DeleteRange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TxnRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Txn(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Txn", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CompactionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Compact(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Compact", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _KV_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.KV", - HandlerType: (*KVServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Range", - Handler: _KV_Range_Handler, - }, - { - MethodName: "Put", - Handler: _KV_Put_Handler, - }, - { - MethodName: "DeleteRange", - Handler: _KV_DeleteRange_Handler, - }, - { - MethodName: "Txn", - Handler: _KV_Txn_Handler, - }, - { - MethodName: "Compact", - Handler: _KV_Compact_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// Client API for Watch service - -type WatchClient interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) -} - -type watchClient struct { - cc *grpc.ClientConn -} - -func NewWatchClient(cc *grpc.ClientConn) WatchClient { - return &watchClient{cc} -} - -func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) - if err != nil { - return nil, err - } - x := &watchWatchClient{stream} - return x, nil -} - -type Watch_WatchClient interface { - Send(*WatchRequest) error - Recv() (*WatchResponse, error) - grpc.ClientStream -} - -type watchWatchClient struct { - grpc.ClientStream -} - -func (x *watchWatchClient) Send(m *WatchRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *watchWatchClient) Recv() (*WatchResponse, error) { - m := new(WatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for Watch service - -type WatchServer interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(Watch_WatchServer) error -} - -func RegisterWatchServer(s *grpc.Server, srv WatchServer) { - s.RegisterService(&_Watch_serviceDesc, srv) -} - -func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WatchServer).Watch(&watchWatchServer{stream}) -} - -type Watch_WatchServer interface { - Send(*WatchResponse) error - Recv() (*WatchRequest, error) - grpc.ServerStream -} - -type watchWatchServer struct { - grpc.ServerStream -} - -func (x *watchWatchServer) Send(m *WatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *watchWatchServer) Recv() (*WatchRequest, error) { - m := new(WatchRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Watch_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Watch", - HandlerType: (*WatchServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Watch_Watch_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Lease service - -type LeaseClient interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) - // LeaseLeases lists all existing leases. - LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) -} - -type leaseClient struct { - cc *grpc.ClientConn -} - -func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { - return &leaseClient{cc} -} - -func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { - out := new(LeaseGrantResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { - out := new(LeaseRevokeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) - if err != nil { - return nil, err - } - x := &leaseLeaseKeepAliveClient{stream} - return x, nil -} - -type Lease_LeaseKeepAliveClient interface { - Send(*LeaseKeepAliveRequest) error - Recv() (*LeaseKeepAliveResponse, error) - grpc.ClientStream -} - -type leaseLeaseKeepAliveClient struct { - grpc.ClientStream -} - -func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { - m := new(LeaseKeepAliveResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { - out := new(LeaseTimeToLiveResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { - out := new(LeaseLeasesResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Lease service - -type LeaseServer interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(Lease_LeaseKeepAliveServer) error - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) - // LeaseLeases lists all existing leases. - LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) -} - -func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { - s.RegisterService(&_Lease_serviceDesc, srv) -} - -func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseGrantRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseGrant(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseGrant", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseRevokeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseRevoke(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseRevoke", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) -} - -type Lease_LeaseKeepAliveServer interface { - Send(*LeaseKeepAliveResponse) error - Recv() (*LeaseKeepAliveRequest, error) - grpc.ServerStream -} - -type leaseLeaseKeepAliveServer struct { - grpc.ServerStream -} - -func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { - m := new(LeaseKeepAliveRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseTimeToLiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseTimeToLive(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseLeases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseLeases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lease_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Lease", - HandlerType: (*LeaseServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LeaseGrant", - Handler: _Lease_LeaseGrant_Handler, - }, - { - MethodName: "LeaseRevoke", - Handler: _Lease_LeaseRevoke_Handler, - }, - { - MethodName: "LeaseTimeToLive", - Handler: _Lease_LeaseTimeToLive_Handler, - }, - { - MethodName: "LeaseLeases", - Handler: _Lease_LeaseLeases_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "LeaseKeepAlive", - Handler: _Lease_LeaseKeepAlive_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Cluster service - -type ClusterClient interface { - // MemberAdd adds a member into the cluster. - MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) -} - -type clusterClient struct { - cc *grpc.ClientConn -} - -func NewClusterClient(cc *grpc.ClientConn) ClusterClient { - return &clusterClient{cc} -} - -func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { - out := new(MemberAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { - out := new(MemberRemoveResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { - out := new(MemberUpdateResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { - out := new(MemberListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Cluster service - -type ClusterServer interface { - // MemberAdd adds a member into the cluster. - MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) -} - -func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { - s.RegisterService(&_Cluster_serviceDesc, srv) -} - -func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberRemoveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberRemove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberRemove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberUpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberUpdate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberUpdate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Cluster_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Cluster", - HandlerType: (*ClusterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "MemberAdd", - Handler: _Cluster_MemberAdd_Handler, - }, - { - MethodName: "MemberRemove", - Handler: _Cluster_MemberRemove_Handler, - }, - { - MethodName: "MemberUpdate", - Handler: _Cluster_MemberUpdate_Handler, - }, - { - MethodName: "MemberList", - Handler: _Cluster_MemberList_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// Client API for Maintenance service - -type MaintenanceClient interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) - // Status gets the status of the member. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) - // HashKV computes the hash of all MVCC keys up to a given revision. - HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) - // MoveLeader requests current leader node to transfer its leadership to transferee. - MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) -} - -type maintenanceClient struct { - cc *grpc.ClientConn -} - -func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { - return &maintenanceClient{cc} -} - -func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { - out := new(AlarmResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { - out := new(DefragmentResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { - out := new(HashResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { - out := new(HashKVResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) - if err != nil { - return nil, err - } - x := &maintenanceSnapshotClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Maintenance_SnapshotClient interface { - Recv() (*SnapshotResponse, error) - grpc.ClientStream -} - -type maintenanceSnapshotClient struct { - grpc.ClientStream -} - -func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { - m := new(SnapshotResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { - out := new(MoveLeaderResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Maintenance service - -type MaintenanceServer interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) - // Status gets the status of the member. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - Hash(context.Context, *HashRequest) (*HashResponse, error) - // HashKV computes the hash of all MVCC keys up to a given revision. - HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error - // MoveLeader requests current leader node to transfer its leadership to transferee. - MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) -} - -func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { - s.RegisterService(&_Maintenance_serviceDesc, srv) -} - -func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AlarmRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Alarm(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Alarm", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DefragmentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Defragment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Defragment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Hash(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Hash", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashKVRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).HashKV(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/HashKV", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SnapshotRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) -} - -type Maintenance_SnapshotServer interface { - Send(*SnapshotResponse) error - grpc.ServerStream -} - -type maintenanceSnapshotServer struct { - grpc.ServerStream -} - -func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MoveLeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).MoveLeader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/MoveLeader", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Maintenance_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Maintenance", - HandlerType: (*MaintenanceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Alarm", - Handler: _Maintenance_Alarm_Handler, - }, - { - MethodName: "Status", - Handler: _Maintenance_Status_Handler, - }, - { - MethodName: "Defragment", - Handler: _Maintenance_Defragment_Handler, - }, - { - MethodName: "Hash", - Handler: _Maintenance_Hash_Handler, - }, - { - MethodName: "HashKV", - Handler: _Maintenance_HashKV_Handler, - }, - { - MethodName: "MoveLeader", - Handler: _Maintenance_MoveLeader_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Snapshot", - Handler: _Maintenance_Snapshot_Handler, - ServerStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Auth service - -type AuthClient interface { - // AuthEnable enables authentication. - AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) - // Authenticate processes an authenticate request. - Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) - // UserAdd adds a new user. - UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. - RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) -} - -type authClient struct { - cc *grpc.ClientConn -} - -func NewAuthClient(cc *grpc.ClientConn) AuthClient { - return &authClient{cc} -} - -func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { - out := new(AuthEnableResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { - out := new(AuthDisableResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { - out := new(AuthenticateResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { - out := new(AuthUserAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { - out := new(AuthUserGetResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { - out := new(AuthUserListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { - out := new(AuthUserDeleteResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { - out := new(AuthUserChangePasswordResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { - out := new(AuthUserGrantRoleResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { - out := new(AuthUserRevokeRoleResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { - out := new(AuthRoleAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { - out := new(AuthRoleGetResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { - out := new(AuthRoleListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { - out := new(AuthRoleDeleteResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { - out := new(AuthRoleGrantPermissionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { - out := new(AuthRoleRevokePermissionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Auth service - -type AuthServer interface { - // AuthEnable enables authentication. - AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) - // Authenticate processes an authenticate request. - Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) - // UserAdd adds a new user. - UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. - RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) -} - -func RegisterAuthServer(s *grpc.Server, srv AuthServer) { - s.RegisterService(&_Auth_serviceDesc, srv) -} - -func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthEnableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthEnable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthEnable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthDisableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthDisable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthDisable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthenticateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).Authenticate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/Authenticate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserChangePasswordRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserChangePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserChangePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGrantRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGrantRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGrantRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserRevokeRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserRevokeRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserRevokeRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGrantPermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGrantPermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleRevokePermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleRevokePermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Auth_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Auth", - HandlerType: (*AuthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AuthEnable", - Handler: _Auth_AuthEnable_Handler, - }, - { - MethodName: "AuthDisable", - Handler: _Auth_AuthDisable_Handler, - }, - { - MethodName: "Authenticate", - Handler: _Auth_Authenticate_Handler, - }, - { - MethodName: "UserAdd", - Handler: _Auth_UserAdd_Handler, - }, - { - MethodName: "UserGet", - Handler: _Auth_UserGet_Handler, - }, - { - MethodName: "UserList", - Handler: _Auth_UserList_Handler, - }, - { - MethodName: "UserDelete", - Handler: _Auth_UserDelete_Handler, - }, - { - MethodName: "UserChangePassword", - Handler: _Auth_UserChangePassword_Handler, - }, - { - MethodName: "UserGrantRole", - Handler: _Auth_UserGrantRole_Handler, - }, - { - MethodName: "UserRevokeRole", - Handler: _Auth_UserRevokeRole_Handler, - }, - { - MethodName: "RoleAdd", - Handler: _Auth_RoleAdd_Handler, - }, - { - MethodName: "RoleGet", - Handler: _Auth_RoleGet_Handler, - }, - { - MethodName: "RoleList", - Handler: _Auth_RoleList_Handler, - }, - { - MethodName: "RoleDelete", - Handler: _Auth_RoleDelete_Handler, - }, - { - MethodName: "RoleGrantPermission", - Handler: _Auth_RoleGrantPermission_Handler, - }, - { - MethodName: "RoleRevokePermission", - Handler: _Auth_RoleRevokePermission_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ClusterId != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) - } - if m.MemberId != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) - } - if m.Revision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.RaftTerm != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - } - return i, nil -} - -func (m *RangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.Limit != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - } - if m.Revision != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.SortOrder != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) - } - if m.Serializable { - dAtA[i] = 0x38 - i++ - if m.Serializable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.KeysOnly { - dAtA[i] = 0x40 - i++ - if m.KeysOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CountOnly { - dAtA[i] = 0x48 - i++ - if m.CountOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.MinModRevision != 0 { - dAtA[i] = 0x50 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - dAtA[i] = 0x58 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) - } - return i, nil -} - -func (m *RangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n1, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.Kvs) > 0 { - for _, msg := range m.Kvs { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.More { - dAtA[i] = 0x18 - i++ - if m.More { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Count != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Count)) - } - return i, nil -} - -func (m *PutRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - if m.Lease != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - } - if m.PrevKv { - dAtA[i] = 0x20 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.IgnoreValue { - dAtA[i] = 0x28 - i++ - if m.IgnoreValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.IgnoreLease { - dAtA[i] = 0x30 - i++ - if m.IgnoreLease { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PutResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n2, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PrevKv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) - n3, err := m.PrevKv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.PrevKv { - dAtA[i] = 0x18 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n4, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Deleted != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, msg := range m.PrevKvs { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RequestOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Request != nil { - nn5, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn5 - } - return i, nil -} - -func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestRange != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) - n6, err := m.RequestRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} -func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestPut != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) - n7, err := m.RequestPut.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} -func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestDeleteRange != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) - n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} -func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestTxn != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) - n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} -func (m *ResponseOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Response != nil { - nn10, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn10 - } - return i, nil -} - -func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseRange != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) - n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - return i, nil -} -func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponsePut != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) - n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} -func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseDeleteRange != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) - n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - return i, nil -} -func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseTxn != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) - n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - return i, nil -} -func (m *Compare) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Compare) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Result != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Result)) - } - if m.Target != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Target)) - } - if len(m.Key) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.TargetUnion != nil { - nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn15 - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x4 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Version)) - return i, nil -} -func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) - return i, nil -} -func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) - return i, nil -} -func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Value != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} -func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x40 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - return i, nil -} -func (m *TxnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Compare) > 0 { - for _, msg := range m.Compare { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Success) > 0 { - for _, msg := range m.Success { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Failure) > 0 { - for _, msg := range m.Failure { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *TxnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n16, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.Succeeded { - dAtA[i] = 0x10 - i++ - if m.Succeeded { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Responses) > 0 { - for _, msg := range m.Responses { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Revision != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.Physical { - dAtA[i] = 0x10 - i++ - if m.Physical { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n17, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - return i, nil -} - -func (m *HashRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Revision != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - return i, nil -} - -func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n18, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Hash != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - } - if m.CompactRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - } - return i, nil -} - -func (m *HashResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n19, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.Hash != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - } - return i, nil -} - -func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n20, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.RemainingBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) - } - if len(m.Blob) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) - i += copy(dAtA[i:], m.Blob) - } - return i, nil -} - -func (m *WatchRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.RequestUnion != nil { - nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn21 - } - return i, nil -} - -func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CreateRequest != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) - n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - return i, nil -} -func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CancelRequest != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) - n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - return i, nil -} -func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.StartRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) - } - if m.ProgressNotify { - dAtA[i] = 0x20 - i++ - if m.ProgressNotify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Filters) > 0 { - dAtA25 := make([]byte, len(m.Filters)*10) - var j24 int - for _, num := range m.Filters { - for num >= 1<<7 { - dAtA25[j24] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j24++ - } - dAtA25[j24] = uint8(num) - j24++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintRpc(dAtA, i, uint64(j24)) - i += copy(dAtA[i:], dAtA25[:j24]) - } - if m.PrevKv { - dAtA[i] = 0x30 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.WatchId != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - } - return i, nil -} - -func (m *WatchResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n26, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - if m.WatchId != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - } - if m.Created { - dAtA[i] = 0x18 - i++ - if m.Created { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Canceled { - dAtA[i] = 0x20 - i++ - if m.Canceled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CompactRevision != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - } - if len(m.CancelReason) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) - i += copy(dAtA[i:], m.CancelReason) - } - if len(m.Events) > 0 { - for _, msg := range m.Events { - dAtA[i] = 0x5a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TTL != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n27, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if len(m.Error) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - } - return i, nil -} - -func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n28, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - return i, nil -} - -func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n29, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - return i, nil -} - -func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.Keys { - dAtA[i] = 0x10 - i++ - if m.Keys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n30, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - dAtA[i] = 0x2a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) - } - } - return i, nil -} - -func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n31, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - if len(m.Leases) > 0 { - for _, msg := range m.Leases { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Member) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Member) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n32, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - if m.Member != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) - n33, err := m.Member.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n34, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n35, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n36, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n37, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - return i, nil -} - -func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TargetID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) - } - return i, nil -} - -func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n38, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - return i, nil -} - -func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Action != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - } - if m.MemberID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - } - if m.Alarm != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - } - return i, nil -} - -func (m *AlarmMember) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MemberID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - } - if m.Alarm != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - } - return i, nil -} - -func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n39, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - } - if len(m.Alarms) > 0 { - for _, msg := range m.Alarms { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n40, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - } - if len(m.Version) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - } - if m.DbSize != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) - } - if m.Leader != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) - } - if m.RaftIndex != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - } - return i, nil -} - -func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.User) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - } - if len(m.Role) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Role) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Perm != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) - n41, err := m.Perm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - return i, nil -} - -func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n42, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - } - return i, nil -} - -func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n43, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - } - return i, nil -} - -func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n44, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if len(m.Token) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - } - return i, nil -} - -func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n45, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - return i, nil -} - -func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n46, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n47, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - } - return i, nil -} - -func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n48, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - } - return i, nil -} - -func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n49, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - } - return i, nil -} - -func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n50, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - } - return i, nil -} - -func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n51, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - } - return i, nil -} - -func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n52, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - } - if len(m.Perm) > 0 { - for _, msg := range m.Perm { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n53, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n54, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 - } - if len(m.Users) > 0 { - for _, s := range m.Users { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n55, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - } - return i, nil -} - -func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n56, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 - } - return i, nil -} - -func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n57, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } - return i, nil -} - -func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ResponseHeader) Size() (n int) { - var l int - _ = l - if m.ClusterId != 0 { - n += 1 + sovRpc(uint64(m.ClusterId)) - } - if m.MemberId != 0 { - n += 1 + sovRpc(uint64(m.MemberId)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - return n -} - -func (m *RangeRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.SortOrder != 0 { - n += 1 + sovRpc(uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - n += 1 + sovRpc(uint64(m.SortTarget)) - } - if m.Serializable { - n += 2 - } - if m.KeysOnly { - n += 2 - } - if m.CountOnly { - n += 2 - } - if m.MinModRevision != 0 { - n += 1 + sovRpc(uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxCreateRevision)) - } - return n -} - -func (m *RangeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Kvs) > 0 { - for _, e := range m.Kvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.More { - n += 2 - } - if m.Count != 0 { - n += 1 + sovRpc(uint64(m.Count)) - } - return n -} - -func (m *PutRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovRpc(uint64(m.Lease)) - } - if m.PrevKv { - n += 2 - } - if m.IgnoreValue { - n += 2 - } - if m.IgnoreLease { - n += 2 - } - return n -} - -func (m *PutResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *DeleteRangeRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *DeleteRangeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Deleted != 0 { - n += 1 + sovRpc(uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, e := range m.PrevKvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *RequestOp) Size() (n int) { - var l int - _ = l - if m.Request != nil { - n += m.Request.Size() - } - return n -} - -func (m *RequestOp_RequestRange) Size() (n int) { - var l int - _ = l - if m.RequestRange != nil { - l = m.RequestRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestPut) Size() (n int) { - var l int - _ = l - if m.RequestPut != nil { - l = m.RequestPut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestDeleteRange) Size() (n int) { - var l int - _ = l - if m.RequestDeleteRange != nil { - l = m.RequestDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestTxn) Size() (n int) { - var l int - _ = l - if m.RequestTxn != nil { - l = m.RequestTxn.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp) Size() (n int) { - var l int - _ = l - if m.Response != nil { - n += m.Response.Size() - } - return n -} - -func (m *ResponseOp_ResponseRange) Size() (n int) { - var l int - _ = l - if m.ResponseRange != nil { - l = m.ResponseRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponsePut) Size() (n int) { - var l int - _ = l - if m.ResponsePut != nil { - l = m.ResponsePut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { - var l int - _ = l - if m.ResponseDeleteRange != nil { - l = m.ResponseDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseTxn) Size() (n int) { - var l int - _ = l - if m.ResponseTxn != nil { - l = m.ResponseTxn.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare) Size() (n int) { - var l int - _ = l - if m.Result != 0 { - n += 1 + sovRpc(uint64(m.Result)) - } - if m.Target != 0 { - n += 1 + sovRpc(uint64(m.Target)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.TargetUnion != nil { - n += m.TargetUnion.Size() - } - l = len(m.RangeEnd) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *Compare_Version) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.Version)) - return n -} -func (m *Compare_CreateRevision) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.CreateRevision)) - return n -} -func (m *Compare_ModRevision) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.ModRevision)) - return n -} -func (m *Compare_Value) Size() (n int) { - var l int - _ = l - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare_Lease) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.Lease)) - return n -} -func (m *TxnRequest) Size() (n int) { - var l int - _ = l - if len(m.Compare) > 0 { - for _, e := range m.Compare { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Success) > 0 { - for _, e := range m.Success { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Failure) > 0 { - for _, e := range m.Failure { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *TxnResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Succeeded { - n += 2 - } - if len(m.Responses) > 0 { - for _, e := range m.Responses { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *CompactionRequest) Size() (n int) { - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.Physical { - n += 2 - } - return n -} - -func (m *CompactionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *HashRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *HashKVRequest) Size() (n int) { - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - return n -} - -func (m *HashKVResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - return n -} - -func (m *HashResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - return n -} - -func (m *SnapshotRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *SnapshotResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.RemainingBytes != 0 { - n += 1 + sovRpc(uint64(m.RemainingBytes)) - } - l = len(m.Blob) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *WatchRequest) Size() (n int) { - var l int - _ = l - if m.RequestUnion != nil { - n += m.RequestUnion.Size() - } - return n -} - -func (m *WatchRequest_CreateRequest) Size() (n int) { - var l int - _ = l - if m.CreateRequest != nil { - l = m.CreateRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchRequest_CancelRequest) Size() (n int) { - var l int - _ = l - if m.CancelRequest != nil { - l = m.CancelRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchCreateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.StartRevision != 0 { - n += 1 + sovRpc(uint64(m.StartRevision)) - } - if m.ProgressNotify { - n += 2 - } - if len(m.Filters) > 0 { - l = 0 - for _, e := range m.Filters { - l += sovRpc(uint64(e)) - } - n += 1 + sovRpc(uint64(l)) + l - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *WatchCancelRequest) Size() (n int) { - var l int - _ = l - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - return n -} - -func (m *WatchResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - if m.Created { - n += 2 - } - if m.Canceled { - n += 2 - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - l = len(m.CancelReason) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *LeaseGrantRequest) Size() (n int) { - var l int - _ = l - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseGrantResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *LeaseRevokeRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseRevokeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *LeaseKeepAliveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseKeepAliveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - return n -} - -func (m *LeaseTimeToLiveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.Keys { - n += 2 - } - return n -} - -func (m *LeaseTimeToLiveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - n += 1 + sovRpc(uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - l = len(b) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *LeaseLeasesRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *LeaseStatus) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseLeasesResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *Member) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberAddRequest) Size() (n int) { - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Member != nil { - l = m.Member.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberRemoveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *MemberRemoveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberUpdateRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberUpdateResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *MemberListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *DefragmentRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *DefragmentResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *MoveLeaderRequest) Size() (n int) { - var l int - _ = l - if m.TargetID != 0 { - n += 1 + sovRpc(uint64(m.TargetID)) - } - return n -} - -func (m *MoveLeaderResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AlarmRequest) Size() (n int) { - var l int - _ = l - if m.Action != 0 { - n += 1 + sovRpc(uint64(m.Action)) - } - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - return n -} - -func (m *AlarmMember) Size() (n int) { - var l int - _ = l - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - return n -} - -func (m *AlarmResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Alarms) > 0 { - for _, e := range m.Alarms { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *StatusRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *StatusResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.DbSize != 0 { - n += 1 + sovRpc(uint64(m.DbSize)) - } - if m.Leader != 0 { - n += 1 + sovRpc(uint64(m.Leader)) - } - if m.RaftIndex != 0 { - n += 1 + sovRpc(uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - return n -} - -func (m *AuthEnableRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthDisableRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthenticateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserAddRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGetRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserDeleteRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserChangePasswordRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGrantRoleRequest) Size() (n int) { - var l int - _ = l - l = len(m.User) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserRevokeRoleRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleAddRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGetRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthRoleListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthRoleDeleteRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGrantPermissionRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Perm != nil { - l = m.Perm.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleRevokePermissionRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthEnableResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthDisableResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthenticateResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Token) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGetResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthUserDeleteResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserChangePasswordResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGrantRoleResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserRevokeRoleResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGetResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Perm) > 0 { - for _, e := range m.Perm { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthRoleListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthUserListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Users) > 0 { - for _, s := range m.Users { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthRoleDeleteResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGrantPermissionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleRevokePermissionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func sovRpc(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRpc(x uint64) (n int) { - return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResponseHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) - } - m.ClusterId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterId |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) - } - m.MemberId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberId |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) - } - m.SortOrder = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) - } - m.SortTarget = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Serializable = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.KeysOnly = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CountOnly = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) - } - m.MinModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) - } - m.MaxModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) - } - m.MinCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinCreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) - } - m.MaxCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxCreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.More = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreValue = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreLease = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &mvccpb.KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - m.Deleted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Deleted |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) - if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestPut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestDeleteRange{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TxnRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestTxn{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponsePut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseDeleteRange{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TxnResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseTxn{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Compare) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Compare: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - m.Result = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Result |= (Compare_CompareResult(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - m.Target = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Version{v} - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_CreateRevision{v} - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_ModRevision{v} - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.TargetUnion = &Compare_Value{v} - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Lease{v} - case 64: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Compare = append(m.Compare, &Compare{}) - if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Success = append(m.Success, &RequestOp{}) - if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Failure = append(m.Failure, &RequestOp{}) - if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Succeeded = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Responses = append(m.Responses, &ResponseOp{}) - if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Physical = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashKVRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashKVResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) - } - m.RemainingBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RemainingBytes |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) - if m.Blob == nil { - m.Blob = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCreateRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CreateRequest{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCancelRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CancelRequest{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) - } - m.StartRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressNotify = bool(v != 0) - case 5: - if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Created = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Canceled = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CancelReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &mvccpb.Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Keys = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) - } - m.GrantedTTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GrantedTTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) - copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, &LeaseStatus{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Member) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Member: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Member == nil { - m.Member = &Member{} - } - if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) - } - m.TargetID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TargetID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= (AlarmType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmMember) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= (AlarmType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Alarms = append(m.Alarms, &AlarmMember{}) - if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) - } - m.DbSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbSize |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - m.Leader = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Leader |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - m.RaftIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftIndex |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Perm == nil { - m.Perm = &authpb.Permission{} - } - if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Perm = append(m.Perm, &authpb.Permission{}) - if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRpc - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRpc(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } - -var fileDescriptorRpc = []byte{ - // 3669 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x23, 0xc7, - 0x72, 0xd6, 0x90, 0x22, 0x29, 0x16, 0x2f, 0xe2, 0xb6, 0xb4, 0xbb, 0x14, 0x77, 0x57, 0xab, 0xed, - 0xbd, 0x69, 0x2f, 0x16, 0x6d, 0xd9, 0xc9, 0xc3, 0x26, 0x30, 0xac, 0x95, 0xe8, 0x95, 0x2c, 0xad, - 0x24, 0x8f, 0xa8, 0xb5, 0x03, 0x38, 0x11, 0x46, 0x64, 0x4b, 0x62, 0x44, 0xce, 0x30, 0x33, 0x43, - 0xae, 0xb4, 0x31, 0x12, 0xc0, 0x71, 0x82, 0xbc, 0xe4, 0x25, 0x06, 0x82, 0xc4, 0xaf, 0x41, 0x60, - 0xf8, 0x07, 0x04, 0xf9, 0x0b, 0x41, 0x5e, 0x12, 0x20, 0x7f, 0xe0, 0xc0, 0xe7, 0xbc, 0x9c, 0x5f, - 0x70, 0x2e, 0x4f, 0x07, 0x7d, 0x9b, 0xe9, 0xb9, 0x51, 0xb2, 0x69, 0xfb, 0x45, 0x3b, 0x5d, 0x5d, - 0x5d, 0x55, 0x5d, 0xdd, 0x55, 0xd5, 0xfd, 0x35, 0x17, 0xf2, 0x76, 0xbf, 0xb5, 0xd4, 0xb7, 0x2d, - 0xd7, 0x42, 0x45, 0xe2, 0xb6, 0xda, 0x0e, 0xb1, 0x87, 0xc4, 0xee, 0x1f, 0xd6, 0x66, 0x8f, 0xad, - 0x63, 0x8b, 0x75, 0xd4, 0xe9, 0x17, 0xe7, 0xa9, 0xcd, 0x51, 0x9e, 0x7a, 0x6f, 0xd8, 0x6a, 0xb1, - 0x3f, 0xfd, 0xc3, 0xfa, 0xe9, 0x50, 0x74, 0xdd, 0x60, 0x5d, 0xc6, 0xc0, 0x3d, 0x61, 0x7f, 0xfa, - 0x87, 0xec, 0x1f, 0xd1, 0x79, 0xf3, 0xd8, 0xb2, 0x8e, 0xbb, 0xa4, 0x6e, 0xf4, 0x3b, 0x75, 0xc3, - 0x34, 0x2d, 0xd7, 0x70, 0x3b, 0x96, 0xe9, 0xf0, 0x5e, 0xfc, 0xf7, 0x1a, 0x94, 0x75, 0xe2, 0xf4, - 0x2d, 0xd3, 0x21, 0xeb, 0xc4, 0x68, 0x13, 0x1b, 0xdd, 0x02, 0x68, 0x75, 0x07, 0x8e, 0x4b, 0xec, - 0x83, 0x4e, 0xbb, 0xaa, 0x2d, 0x68, 0x8b, 0x93, 0x7a, 0x5e, 0x50, 0x36, 0xda, 0xe8, 0x06, 0xe4, - 0x7b, 0xa4, 0x77, 0xc8, 0x7b, 0x53, 0xac, 0x77, 0x8a, 0x13, 0x36, 0xda, 0xa8, 0x06, 0x53, 0x36, - 0x19, 0x76, 0x9c, 0x8e, 0x65, 0x56, 0xd3, 0x0b, 0xda, 0x62, 0x5a, 0xf7, 0xda, 0x74, 0xa0, 0x6d, - 0x1c, 0xb9, 0x07, 0x2e, 0xb1, 0x7b, 0xd5, 0x49, 0x3e, 0x90, 0x12, 0x9a, 0xc4, 0xee, 0xe1, 0x2f, - 0x33, 0x50, 0xd4, 0x0d, 0xf3, 0x98, 0xe8, 0xe4, 0xaf, 0x06, 0xc4, 0x71, 0x51, 0x05, 0xd2, 0xa7, - 0xe4, 0x9c, 0xa9, 0x2f, 0xea, 0xf4, 0x93, 0x8f, 0x37, 0x8f, 0xc9, 0x01, 0x31, 0xb9, 0xe2, 0x22, - 0x1d, 0x6f, 0x1e, 0x93, 0x86, 0xd9, 0x46, 0xb3, 0x90, 0xe9, 0x76, 0x7a, 0x1d, 0x57, 0x68, 0xe5, - 0x8d, 0x80, 0x39, 0x93, 0x21, 0x73, 0x56, 0x01, 0x1c, 0xcb, 0x76, 0x0f, 0x2c, 0xbb, 0x4d, 0xec, - 0x6a, 0x66, 0x41, 0x5b, 0x2c, 0x2f, 0xdf, 0x5b, 0x52, 0x17, 0x62, 0x49, 0x35, 0x68, 0x69, 0xcf, - 0xb2, 0xdd, 0x1d, 0xca, 0xab, 0xe7, 0x1d, 0xf9, 0x89, 0x3e, 0x84, 0x02, 0x13, 0xe2, 0x1a, 0xf6, - 0x31, 0x71, 0xab, 0x59, 0x26, 0xe5, 0xfe, 0x05, 0x52, 0x9a, 0x8c, 0x59, 0x67, 0xea, 0xf9, 0x37, - 0xc2, 0x50, 0x74, 0x88, 0xdd, 0x31, 0xba, 0x9d, 0x37, 0xc6, 0x61, 0x97, 0x54, 0x73, 0x0b, 0xda, - 0xe2, 0x94, 0x1e, 0xa0, 0xd1, 0xf9, 0x9f, 0x92, 0x73, 0xe7, 0xc0, 0x32, 0xbb, 0xe7, 0xd5, 0x29, - 0xc6, 0x30, 0x45, 0x09, 0x3b, 0x66, 0xf7, 0x9c, 0x2d, 0x9a, 0x35, 0x30, 0x5d, 0xde, 0x9b, 0x67, - 0xbd, 0x79, 0x46, 0x61, 0xdd, 0x8b, 0x50, 0xe9, 0x75, 0xcc, 0x83, 0x9e, 0xd5, 0x3e, 0xf0, 0x1c, - 0x02, 0xcc, 0x21, 0xe5, 0x5e, 0xc7, 0x7c, 0x69, 0xb5, 0x75, 0xe9, 0x16, 0xca, 0x69, 0x9c, 0x05, - 0x39, 0x0b, 0x82, 0xd3, 0x38, 0x53, 0x39, 0x97, 0x60, 0x86, 0xca, 0x6c, 0xd9, 0xc4, 0x70, 0x89, - 0xcf, 0x5c, 0x64, 0xcc, 0x57, 0x7a, 0x1d, 0x73, 0x95, 0xf5, 0x04, 0xf8, 0x8d, 0xb3, 0x08, 0x7f, - 0x49, 0xf0, 0x1b, 0x67, 0x41, 0x7e, 0xbc, 0x04, 0x79, 0xcf, 0xe7, 0x68, 0x0a, 0x26, 0xb7, 0x77, - 0xb6, 0x1b, 0x95, 0x09, 0x04, 0x90, 0x5d, 0xd9, 0x5b, 0x6d, 0x6c, 0xaf, 0x55, 0x34, 0x54, 0x80, - 0xdc, 0x5a, 0x83, 0x37, 0x52, 0xf8, 0x39, 0x80, 0xef, 0x5d, 0x94, 0x83, 0xf4, 0x66, 0xe3, 0xcf, - 0x2a, 0x13, 0x94, 0xe7, 0x55, 0x43, 0xdf, 0xdb, 0xd8, 0xd9, 0xae, 0x68, 0x74, 0xf0, 0xaa, 0xde, - 0x58, 0x69, 0x36, 0x2a, 0x29, 0xca, 0xf1, 0x72, 0x67, 0xad, 0x92, 0x46, 0x79, 0xc8, 0xbc, 0x5a, - 0xd9, 0xda, 0x6f, 0x54, 0x26, 0xf1, 0x57, 0x1a, 0x94, 0xc4, 0x7a, 0xf1, 0x98, 0x40, 0xef, 0x41, - 0xf6, 0x84, 0xc5, 0x05, 0xdb, 0x8a, 0x85, 0xe5, 0x9b, 0xa1, 0xc5, 0x0d, 0xc4, 0x8e, 0x2e, 0x78, - 0x11, 0x86, 0xf4, 0xe9, 0xd0, 0xa9, 0xa6, 0x16, 0xd2, 0x8b, 0x85, 0xe5, 0xca, 0x12, 0x0f, 0xd8, - 0xa5, 0x4d, 0x72, 0xfe, 0xca, 0xe8, 0x0e, 0x88, 0x4e, 0x3b, 0x11, 0x82, 0xc9, 0x9e, 0x65, 0x13, - 0xb6, 0x63, 0xa7, 0x74, 0xf6, 0x4d, 0xb7, 0x31, 0x5b, 0x34, 0xb1, 0x5b, 0x79, 0x03, 0x7f, 0xab, - 0x01, 0xec, 0x0e, 0xdc, 0xe4, 0xd0, 0x98, 0x85, 0xcc, 0x90, 0x0a, 0x16, 0x61, 0xc1, 0x1b, 0x2c, - 0x26, 0x88, 0xe1, 0x10, 0x2f, 0x26, 0x68, 0x03, 0x5d, 0x87, 0x5c, 0xdf, 0x26, 0xc3, 0x83, 0xd3, - 0x21, 0x53, 0x32, 0xa5, 0x67, 0x69, 0x73, 0x73, 0x88, 0xee, 0x40, 0xb1, 0x73, 0x6c, 0x5a, 0x36, - 0x39, 0xe0, 0xb2, 0x32, 0xac, 0xb7, 0xc0, 0x69, 0xcc, 0x6e, 0x85, 0x85, 0x0b, 0xce, 0xaa, 0x2c, - 0x5b, 0x94, 0x84, 0x4d, 0x28, 0x30, 0x53, 0xc7, 0x72, 0xdf, 0x23, 0xdf, 0xc6, 0x14, 0x1b, 0x16, - 0x75, 0xa1, 0xb0, 0x1a, 0x7f, 0x06, 0x68, 0x8d, 0x74, 0x89, 0x4b, 0xc6, 0xc9, 0x1e, 0x8a, 0x4f, - 0xd2, 0xaa, 0x4f, 0xf0, 0x3f, 0x6b, 0x30, 0x13, 0x10, 0x3f, 0xd6, 0xb4, 0xaa, 0x90, 0x6b, 0x33, - 0x61, 0xdc, 0x82, 0xb4, 0x2e, 0x9b, 0xe8, 0x09, 0x4c, 0x09, 0x03, 0x9c, 0x6a, 0x3a, 0x61, 0xd3, - 0xe4, 0xb8, 0x4d, 0x0e, 0xfe, 0x36, 0x05, 0x79, 0x31, 0xd1, 0x9d, 0x3e, 0x5a, 0x81, 0x92, 0xcd, - 0x1b, 0x07, 0x6c, 0x3e, 0xc2, 0xa2, 0x5a, 0x72, 0x12, 0x5a, 0x9f, 0xd0, 0x8b, 0x62, 0x08, 0x23, - 0xa3, 0x3f, 0x81, 0x82, 0x14, 0xd1, 0x1f, 0xb8, 0xc2, 0xe5, 0xd5, 0xa0, 0x00, 0x7f, 0xff, 0xad, - 0x4f, 0xe8, 0x20, 0xd8, 0x77, 0x07, 0x2e, 0x6a, 0xc2, 0xac, 0x1c, 0xcc, 0x67, 0x23, 0xcc, 0x48, - 0x33, 0x29, 0x0b, 0x41, 0x29, 0xd1, 0xa5, 0x5a, 0x9f, 0xd0, 0x91, 0x18, 0xaf, 0x74, 0xaa, 0x26, - 0xb9, 0x67, 0x3c, 0x79, 0x47, 0x4c, 0x6a, 0x9e, 0x99, 0x51, 0x93, 0x9a, 0x67, 0xe6, 0xf3, 0x3c, - 0xe4, 0x44, 0x0b, 0xff, 0x57, 0x0a, 0x40, 0xae, 0xc6, 0x4e, 0x1f, 0xad, 0x41, 0xd9, 0x16, 0xad, - 0x80, 0xb7, 0x6e, 0xc4, 0x7a, 0x4b, 0x2c, 0xe2, 0x84, 0x5e, 0x92, 0x83, 0xb8, 0x71, 0xef, 0x43, - 0xd1, 0x93, 0xe2, 0x3b, 0x6c, 0x2e, 0xc6, 0x61, 0x9e, 0x84, 0x82, 0x1c, 0x40, 0x5d, 0xf6, 0x09, - 0x5c, 0xf5, 0xc6, 0xc7, 0xf8, 0xec, 0xce, 0x08, 0x9f, 0x79, 0x02, 0x67, 0xa4, 0x04, 0xd5, 0x6b, - 0xaa, 0x61, 0xbe, 0xdb, 0xe6, 0x62, 0xdc, 0x16, 0x35, 0x8c, 0x3a, 0x0e, 0x68, 0xbd, 0xe4, 0x4d, - 0xfc, 0xeb, 0x34, 0xe4, 0x56, 0xad, 0x5e, 0xdf, 0xb0, 0xe9, 0x6a, 0x64, 0x6d, 0xe2, 0x0c, 0xba, - 0x2e, 0x73, 0x57, 0x79, 0xf9, 0x6e, 0x50, 0xa2, 0x60, 0x93, 0xff, 0xea, 0x8c, 0x55, 0x17, 0x43, - 0xe8, 0x60, 0x51, 0x1e, 0x53, 0x97, 0x18, 0x2c, 0x8a, 0xa3, 0x18, 0x22, 0x03, 0x39, 0xed, 0x07, - 0x72, 0x0d, 0x72, 0x43, 0x62, 0xfb, 0x25, 0x7d, 0x7d, 0x42, 0x97, 0x04, 0xf4, 0x08, 0xa6, 0xc3, - 0xe5, 0x25, 0x23, 0x78, 0xca, 0xad, 0x60, 0x35, 0xba, 0x0b, 0xc5, 0x40, 0x8d, 0xcb, 0x0a, 0xbe, - 0x42, 0x4f, 0x29, 0x71, 0xd7, 0x64, 0x5e, 0xa5, 0xf5, 0xb8, 0xb8, 0x3e, 0x21, 0x33, 0xeb, 0x35, - 0x99, 0x59, 0xa7, 0xc4, 0x28, 0x91, 0x5b, 0x03, 0x49, 0xe6, 0x83, 0x60, 0x92, 0xc1, 0x1f, 0x40, - 0x29, 0xe0, 0x20, 0x5a, 0x77, 0x1a, 0x1f, 0xef, 0xaf, 0x6c, 0xf1, 0x22, 0xf5, 0x82, 0xd5, 0x25, - 0xbd, 0xa2, 0xd1, 0x5a, 0xb7, 0xd5, 0xd8, 0xdb, 0xab, 0xa4, 0x50, 0x09, 0xf2, 0xdb, 0x3b, 0xcd, - 0x03, 0xce, 0x95, 0xc6, 0x2f, 0x3c, 0x09, 0xa2, 0xc8, 0x29, 0xb5, 0x6d, 0x42, 0xa9, 0x6d, 0x9a, - 0xac, 0x6d, 0x29, 0xbf, 0xb6, 0xb1, 0x32, 0xb7, 0xd5, 0x58, 0xd9, 0x6b, 0x54, 0x26, 0x9f, 0x97, - 0xa1, 0xc8, 0xfd, 0x7b, 0x30, 0x30, 0x69, 0xa9, 0xfd, 0x77, 0x0d, 0xc0, 0x8f, 0x26, 0x54, 0x87, - 0x5c, 0x8b, 0xeb, 0xa9, 0x6a, 0x2c, 0x19, 0x5d, 0x8d, 0x5d, 0x32, 0x5d, 0x72, 0xa1, 0x77, 0x20, - 0xe7, 0x0c, 0x5a, 0x2d, 0xe2, 0xc8, 0x92, 0x77, 0x3d, 0x9c, 0x0f, 0x45, 0xb6, 0xd2, 0x25, 0x1f, - 0x1d, 0x72, 0x64, 0x74, 0xba, 0x03, 0x56, 0x00, 0x47, 0x0f, 0x11, 0x7c, 0xf8, 0xdf, 0x34, 0x28, - 0x28, 0x9b, 0xf7, 0x07, 0x26, 0xe1, 0x9b, 0x90, 0x67, 0x36, 0x90, 0xb6, 0x48, 0xc3, 0x53, 0xba, - 0x4f, 0x40, 0x7f, 0x0c, 0x79, 0x19, 0x01, 0x32, 0x13, 0x57, 0xe3, 0xc5, 0xee, 0xf4, 0x75, 0x9f, - 0x15, 0x6f, 0xc2, 0x15, 0xe6, 0x95, 0x16, 0x3d, 0x5c, 0x4b, 0x3f, 0xaa, 0xc7, 0x4f, 0x2d, 0x74, - 0xfc, 0xac, 0xc1, 0x54, 0xff, 0xe4, 0xdc, 0xe9, 0xb4, 0x8c, 0xae, 0xb0, 0xc2, 0x6b, 0xe3, 0x8f, - 0x00, 0xa9, 0xc2, 0xc6, 0x99, 0x2e, 0x2e, 0x41, 0x61, 0xdd, 0x70, 0x4e, 0x84, 0x49, 0xf8, 0x09, - 0x94, 0x68, 0x73, 0xf3, 0xd5, 0x25, 0x6c, 0x64, 0x97, 0x03, 0xc9, 0x3d, 0x96, 0xcf, 0x11, 0x4c, - 0x9e, 0x18, 0xce, 0x09, 0x9b, 0x68, 0x49, 0x67, 0xdf, 0xe8, 0x11, 0x54, 0x5a, 0x7c, 0x92, 0x07, - 0xa1, 0x2b, 0xc3, 0xb4, 0xa0, 0x7b, 0x27, 0xc1, 0x4f, 0xa1, 0xc8, 0xe7, 0xf0, 0x63, 0x1b, 0x81, - 0xaf, 0xc0, 0xf4, 0x9e, 0x69, 0xf4, 0x9d, 0x13, 0x4b, 0x56, 0x37, 0x3a, 0xe9, 0x8a, 0x4f, 0x1b, - 0x4b, 0xe3, 0x43, 0x98, 0xb6, 0x49, 0xcf, 0xe8, 0x98, 0x1d, 0xf3, 0xf8, 0xe0, 0xf0, 0xdc, 0x25, - 0x8e, 0xb8, 0x30, 0x95, 0x3d, 0xf2, 0x73, 0x4a, 0xa5, 0xa6, 0x1d, 0x76, 0xad, 0x43, 0x91, 0xe6, - 0xd8, 0x37, 0xfe, 0x4f, 0x0d, 0x8a, 0x9f, 0x18, 0x6e, 0x4b, 0x2e, 0x1d, 0xda, 0x80, 0xb2, 0x97, - 0xdc, 0x18, 0x45, 0xd8, 0x12, 0x2a, 0xb1, 0x6c, 0x8c, 0x3c, 0x4a, 0xcb, 0xea, 0x58, 0x6a, 0xa9, - 0x04, 0x26, 0xca, 0x30, 0x5b, 0xa4, 0xeb, 0x89, 0x4a, 0x25, 0x8b, 0x62, 0x8c, 0xaa, 0x28, 0x95, - 0xf0, 0x7c, 0xda, 0x3f, 0x7e, 0xf0, 0x5c, 0xf2, 0x75, 0x0a, 0x50, 0xd4, 0x86, 0xef, 0x7b, 0x22, - 0xbb, 0x0f, 0x65, 0xc7, 0x35, 0xec, 0xc8, 0xde, 0x28, 0x31, 0xaa, 0x97, 0xa0, 0x1f, 0xc2, 0x74, - 0xdf, 0xb6, 0x8e, 0x6d, 0xe2, 0x38, 0x07, 0xa6, 0xe5, 0x76, 0x8e, 0xce, 0xc5, 0xa1, 0xb6, 0x2c, - 0xc9, 0xdb, 0x8c, 0x8a, 0x1a, 0x90, 0x3b, 0xea, 0x74, 0x5d, 0x62, 0x3b, 0xd5, 0xcc, 0x42, 0x7a, - 0xb1, 0xbc, 0xfc, 0xe4, 0x22, 0xaf, 0x2d, 0x7d, 0xc8, 0xf8, 0x9b, 0xe7, 0x7d, 0xa2, 0xcb, 0xb1, - 0xea, 0x41, 0x31, 0x1b, 0x38, 0x28, 0xde, 0x07, 0xf0, 0xf9, 0x69, 0xaa, 0xdd, 0xde, 0xd9, 0xdd, - 0x6f, 0x56, 0x26, 0x50, 0x11, 0xa6, 0xb6, 0x77, 0xd6, 0x1a, 0x5b, 0x0d, 0x9a, 0x97, 0x71, 0x5d, - 0xfa, 0x46, 0xf5, 0x21, 0x9a, 0x83, 0xa9, 0xd7, 0x94, 0x2a, 0xef, 0xdb, 0x69, 0x3d, 0xc7, 0xda, - 0x1b, 0x6d, 0xfc, 0x4f, 0x29, 0x28, 0x89, 0x5d, 0x30, 0xd6, 0x56, 0x54, 0x55, 0xa4, 0x02, 0x2a, - 0xe8, 0xa9, 0x94, 0xef, 0x8e, 0xb6, 0x38, 0xfc, 0xca, 0x26, 0xcd, 0x0d, 0x7c, 0xb1, 0x49, 0x5b, - 0xb8, 0xd5, 0x6b, 0xc7, 0x86, 0x6f, 0x26, 0x36, 0x7c, 0xd1, 0x5d, 0x28, 0x79, 0xbb, 0xcd, 0x70, - 0x44, 0xad, 0xcd, 0xeb, 0x45, 0xb9, 0x91, 0x28, 0x0d, 0xdd, 0x87, 0x2c, 0x19, 0x12, 0xd3, 0x75, - 0xaa, 0x05, 0x96, 0x75, 0x4b, 0xf2, 0xfc, 0xdb, 0xa0, 0x54, 0x5d, 0x74, 0xe2, 0x3f, 0x82, 0x2b, - 0xec, 0x9e, 0xf1, 0xc2, 0x36, 0x4c, 0xf5, 0x42, 0xd4, 0x6c, 0x6e, 0x09, 0xd7, 0xd1, 0x4f, 0x54, - 0x86, 0xd4, 0xc6, 0x9a, 0x98, 0x68, 0x6a, 0x63, 0x0d, 0x7f, 0xa1, 0x01, 0x52, 0xc7, 0x8d, 0xe5, - 0xcb, 0x90, 0x70, 0xa9, 0x3e, 0xed, 0xab, 0x9f, 0x85, 0x0c, 0xb1, 0x6d, 0xcb, 0x66, 0x5e, 0xcb, - 0xeb, 0xbc, 0x81, 0xef, 0x09, 0x1b, 0x74, 0x32, 0xb4, 0x4e, 0xbd, 0xc0, 0xe0, 0xd2, 0x34, 0xcf, - 0xd4, 0x4d, 0x98, 0x09, 0x70, 0x8d, 0x95, 0xfd, 0x1f, 0xc2, 0x55, 0x26, 0x6c, 0x93, 0x90, 0xfe, - 0x4a, 0xb7, 0x33, 0x4c, 0xd4, 0xda, 0x87, 0x6b, 0x61, 0xc6, 0x9f, 0xd6, 0x47, 0xf8, 0x4f, 0x85, - 0xc6, 0x66, 0xa7, 0x47, 0x9a, 0xd6, 0x56, 0xb2, 0x6d, 0x34, 0x3b, 0x9e, 0x92, 0x73, 0x47, 0x94, - 0x49, 0xf6, 0x8d, 0xff, 0x43, 0x83, 0xeb, 0x91, 0xe1, 0x3f, 0xf1, 0xaa, 0xce, 0x03, 0x1c, 0xd3, - 0xed, 0x43, 0xda, 0xb4, 0x83, 0xdf, 0xd0, 0x15, 0x8a, 0x67, 0x27, 0x4d, 0x30, 0x45, 0x61, 0xe7, - 0xac, 0x58, 0x73, 0xf6, 0xc7, 0x91, 0x35, 0xe6, 0x16, 0x14, 0x18, 0x61, 0xcf, 0x35, 0xdc, 0x81, - 0x13, 0x59, 0x8c, 0xbf, 0x11, 0x5b, 0x40, 0x0e, 0x1a, 0x6b, 0x5e, 0xef, 0x40, 0x96, 0x1d, 0x4e, - 0xe5, 0xd1, 0x2c, 0x74, 0x1b, 0x50, 0xec, 0xd0, 0x05, 0x23, 0x3e, 0x81, 0xec, 0x4b, 0x86, 0xe8, - 0x29, 0x96, 0x4d, 0xca, 0xa5, 0x30, 0x8d, 0x1e, 0xc7, 0x19, 0xf2, 0x3a, 0xfb, 0x66, 0x27, 0x19, - 0x42, 0xec, 0x7d, 0x7d, 0x8b, 0x9f, 0x98, 0xf2, 0xba, 0xd7, 0xa6, 0x2e, 0x6b, 0x75, 0x3b, 0xc4, - 0x74, 0x59, 0xef, 0x24, 0xeb, 0x55, 0x28, 0x78, 0x09, 0x2a, 0x5c, 0xd3, 0x4a, 0xbb, 0xad, 0x9c, - 0x48, 0x3c, 0x79, 0x5a, 0x50, 0x1e, 0xfe, 0x46, 0x83, 0x2b, 0xca, 0x80, 0xb1, 0x1c, 0xf3, 0x14, - 0xb2, 0x1c, 0xb7, 0x14, 0xc5, 0x6f, 0x36, 0x38, 0x8a, 0xab, 0xd1, 0x05, 0x0f, 0x5a, 0x82, 0x1c, - 0xff, 0x92, 0xc7, 0xc2, 0x78, 0x76, 0xc9, 0x84, 0xef, 0xc3, 0x8c, 0x20, 0x91, 0x9e, 0x15, 0xb7, - 0xb7, 0x99, 0x43, 0xf1, 0xe7, 0x30, 0x1b, 0x64, 0x1b, 0x6b, 0x4a, 0x8a, 0x91, 0xa9, 0xcb, 0x18, - 0xb9, 0x22, 0x8d, 0xdc, 0xef, 0xb7, 0x95, 0x5a, 0x1d, 0x5e, 0x75, 0x75, 0x45, 0x52, 0xa1, 0x15, - 0xf1, 0x26, 0x20, 0x45, 0xfc, 0xac, 0x13, 0x98, 0x91, 0xdb, 0x61, 0xab, 0xe3, 0x78, 0x27, 0xb8, - 0x37, 0x80, 0x54, 0xe2, 0xcf, 0x6d, 0xd0, 0x1a, 0x39, 0xb2, 0x8d, 0xe3, 0x1e, 0xf1, 0xea, 0x13, - 0x3d, 0xcf, 0xab, 0xc4, 0xb1, 0x32, 0x7a, 0x1d, 0xae, 0xbc, 0xb4, 0x86, 0x34, 0x35, 0x50, 0xaa, - 0x1f, 0x32, 0xfc, 0x3e, 0xe7, 0x2d, 0x9b, 0xd7, 0xa6, 0xca, 0xd5, 0x01, 0x63, 0x29, 0xff, 0x5f, - 0x0d, 0x8a, 0x2b, 0x5d, 0xc3, 0xee, 0x49, 0xc5, 0xef, 0x43, 0x96, 0xdf, 0x52, 0x04, 0x30, 0xf0, - 0x20, 0x28, 0x46, 0xe5, 0xe5, 0x8d, 0x15, 0x7e, 0xa7, 0x11, 0xa3, 0xa8, 0xe1, 0xe2, 0xed, 0x60, - 0x2d, 0xf4, 0x96, 0xb0, 0x86, 0xde, 0x82, 0x8c, 0x41, 0x87, 0xb0, 0x14, 0x5c, 0x0e, 0xdf, 0x0f, - 0x99, 0x34, 0x76, 0x38, 0xe3, 0x5c, 0xf8, 0x3d, 0x28, 0x28, 0x1a, 0xe8, 0x0d, 0xf8, 0x45, 0x43, - 0x1c, 0xc0, 0x56, 0x56, 0x9b, 0x1b, 0xaf, 0xf8, 0xc5, 0xb8, 0x0c, 0xb0, 0xd6, 0xf0, 0xda, 0x29, - 0xfc, 0xa9, 0x18, 0x25, 0xf2, 0x9d, 0x6a, 0x8f, 0x96, 0x64, 0x4f, 0xea, 0x52, 0xf6, 0x9c, 0x41, - 0x49, 0x4c, 0x7f, 0xdc, 0xf4, 0xcd, 0xe4, 0x25, 0xa4, 0x6f, 0xc5, 0x78, 0x5d, 0x30, 0xe2, 0x69, - 0x28, 0x89, 0x84, 0x2e, 0xf6, 0xdf, 0xff, 0x68, 0x50, 0x96, 0x94, 0x71, 0x01, 0x4c, 0x89, 0xbd, - 0xf0, 0x0a, 0xe0, 0x21, 0x2f, 0xd7, 0x20, 0xdb, 0x3e, 0xdc, 0xeb, 0xbc, 0x91, 0x60, 0xb3, 0x68, - 0x51, 0x7a, 0x97, 0xeb, 0xe1, 0x2f, 0x3e, 0xa2, 0x45, 0x6f, 0xe1, 0xb6, 0x71, 0xe4, 0x6e, 0x98, - 0x6d, 0x72, 0xc6, 0xce, 0x8d, 0x93, 0xba, 0x4f, 0x60, 0x97, 0x52, 0xf1, 0x32, 0xc4, 0x0e, 0x8b, - 0xea, 0x4b, 0xd1, 0x0c, 0x5c, 0x59, 0x19, 0xb8, 0x27, 0x0d, 0xd3, 0x38, 0xec, 0xca, 0x8c, 0x45, - 0xcb, 0x2c, 0x25, 0xae, 0x75, 0x1c, 0x95, 0xda, 0x80, 0x19, 0x4a, 0x25, 0xa6, 0xdb, 0x69, 0x29, - 0xe9, 0x4d, 0x16, 0x31, 0x2d, 0x54, 0xc4, 0x0c, 0xc7, 0x79, 0x6d, 0xd9, 0x6d, 0x31, 0x35, 0xaf, - 0x8d, 0xd7, 0xb8, 0xf0, 0x7d, 0x27, 0x50, 0xa6, 0xbe, 0xaf, 0x94, 0x45, 0x5f, 0xca, 0x0b, 0xe2, - 0x8e, 0x90, 0x82, 0x9f, 0xc0, 0x55, 0xc9, 0x29, 0xc0, 0xbd, 0x11, 0xcc, 0x3b, 0x70, 0x4b, 0x32, - 0xaf, 0x9e, 0xd0, 0xdb, 0xd3, 0xae, 0x50, 0xf8, 0x43, 0xed, 0x7c, 0x0e, 0x55, 0xcf, 0x4e, 0x76, - 0x58, 0xb6, 0xba, 0xaa, 0x01, 0x03, 0x47, 0xec, 0x99, 0xbc, 0xce, 0xbe, 0x29, 0xcd, 0xb6, 0xba, - 0xde, 0x91, 0x80, 0x7e, 0xe3, 0x55, 0x98, 0x93, 0x32, 0xc4, 0x31, 0x36, 0x28, 0x24, 0x62, 0x50, - 0x9c, 0x10, 0xe1, 0x30, 0x3a, 0x74, 0xb4, 0xdb, 0x55, 0xce, 0xa0, 0x6b, 0x99, 0x4c, 0x4d, 0x91, - 0x79, 0x95, 0xef, 0x08, 0x6a, 0x98, 0x5a, 0x31, 0x04, 0x99, 0x0a, 0x50, 0xc9, 0x62, 0x21, 0x28, - 0x39, 0xb2, 0x10, 0x11, 0xd1, 0x9f, 0xc1, 0xbc, 0x67, 0x04, 0xf5, 0xdb, 0x2e, 0xb1, 0x7b, 0x1d, - 0xc7, 0x51, 0xe0, 0xa0, 0xb8, 0x89, 0x3f, 0x80, 0xc9, 0x3e, 0x11, 0x39, 0xa5, 0xb0, 0x8c, 0x96, - 0xf8, 0xfb, 0xed, 0x92, 0x32, 0x98, 0xf5, 0xe3, 0x36, 0xdc, 0x96, 0xd2, 0xb9, 0x47, 0x63, 0xc5, - 0x87, 0x8d, 0x92, 0xb7, 0x6e, 0xee, 0xd6, 0xe8, 0xad, 0x3b, 0xcd, 0xd7, 0xde, 0x83, 0x28, 0x3f, - 0xe2, 0x8e, 0x94, 0xb1, 0x35, 0x56, 0xad, 0xd8, 0xe4, 0x3e, 0xf5, 0x42, 0x72, 0x2c, 0x61, 0x87, - 0x30, 0x1b, 0x8c, 0xe4, 0xb1, 0xd2, 0xd8, 0x2c, 0x64, 0x5c, 0xeb, 0x94, 0xc8, 0x24, 0xc6, 0x1b, - 0xd2, 0x60, 0x2f, 0xcc, 0xc7, 0x32, 0xd8, 0xf0, 0x85, 0xb1, 0x2d, 0x39, 0xae, 0xbd, 0x74, 0x35, - 0xe5, 0xe1, 0x8b, 0x37, 0xf0, 0x36, 0x5c, 0x0b, 0xa7, 0x89, 0xb1, 0x4c, 0x7e, 0xc5, 0x37, 0x70, - 0x5c, 0x26, 0x19, 0x4b, 0xee, 0xc7, 0x7e, 0x32, 0x50, 0x12, 0xca, 0x58, 0x22, 0x75, 0xa8, 0xc5, - 0xe5, 0x97, 0x1f, 0x63, 0xbf, 0x7a, 0xe9, 0x66, 0x2c, 0x61, 0x8e, 0x2f, 0x6c, 0xfc, 0xe5, 0xf7, - 0x73, 0x44, 0x7a, 0x64, 0x8e, 0x10, 0x41, 0xe2, 0x67, 0xb1, 0x9f, 0x60, 0xd3, 0x09, 0x1d, 0x7e, - 0x02, 0x1d, 0x57, 0x07, 0xad, 0x21, 0x9e, 0x0e, 0xd6, 0x90, 0x1b, 0x5b, 0x4d, 0xbb, 0x63, 0x2d, - 0xc6, 0x27, 0x7e, 0xee, 0x8c, 0x64, 0xe6, 0xb1, 0x04, 0x7f, 0x0a, 0x0b, 0xc9, 0x49, 0x79, 0x1c, - 0xc9, 0x8f, 0xeb, 0x90, 0xf7, 0x0e, 0x94, 0xca, 0x6f, 0x1f, 0x0a, 0x90, 0xdb, 0xde, 0xd9, 0xdb, - 0x5d, 0x59, 0x6d, 0xf0, 0x1f, 0x3f, 0xac, 0xee, 0xe8, 0xfa, 0xfe, 0x6e, 0xb3, 0x92, 0x5a, 0xfe, - 0x6d, 0x1a, 0x52, 0x9b, 0xaf, 0xd0, 0x9f, 0x43, 0x86, 0xbf, 0x04, 0x8e, 0x78, 0xfe, 0xad, 0x8d, - 0x7a, 0xec, 0xc4, 0x37, 0xbe, 0xf8, 0xff, 0x5f, 0x7d, 0x95, 0xba, 0x8a, 0x2b, 0xf5, 0xe1, 0xbb, - 0x87, 0xc4, 0x35, 0xea, 0xa7, 0xc3, 0x3a, 0xab, 0x0f, 0xcf, 0xb4, 0xc7, 0x68, 0x1f, 0xd2, 0xbb, - 0x03, 0x17, 0x25, 0x3e, 0x0d, 0xd7, 0x92, 0xdf, 0x40, 0xf1, 0x1c, 0x13, 0x3c, 0x83, 0xcb, 0x8a, - 0xe0, 0xfe, 0xc0, 0xa5, 0x62, 0x07, 0x50, 0x50, 0x5f, 0x31, 0x2f, 0x7c, 0x33, 0xae, 0x5d, 0xfc, - 0x42, 0x8a, 0xef, 0x30, 0x75, 0x37, 0xf0, 0x35, 0x45, 0x1d, 0x7f, 0x6b, 0x55, 0x67, 0xd3, 0x3c, - 0x33, 0x51, 0xe2, 0xab, 0x72, 0x2d, 0xf9, 0xe1, 0x34, 0x76, 0x36, 0xee, 0x99, 0x49, 0xc5, 0x9a, - 0xe2, 0xdd, 0xb4, 0xe5, 0xa2, 0xdb, 0x31, 0xef, 0x66, 0xea, 0x0b, 0x51, 0x6d, 0x21, 0x99, 0x41, - 0x28, 0x5a, 0x60, 0x8a, 0x6a, 0xf8, 0xaa, 0xa2, 0xa8, 0xe5, 0xb1, 0x3d, 0xd3, 0x1e, 0x2f, 0x1f, - 0x43, 0x86, 0x21, 0xc4, 0xe8, 0x2f, 0xe4, 0x47, 0x2d, 0x06, 0xdb, 0x4e, 0x58, 0xfc, 0x00, 0xb6, - 0x8c, 0xab, 0x4c, 0x19, 0xc2, 0x25, 0xa9, 0x8c, 0x61, 0xc4, 0xcf, 0xb4, 0xc7, 0x8b, 0xda, 0xdb, - 0xda, 0xf2, 0x6f, 0x26, 0x21, 0xc3, 0xe0, 0x22, 0x64, 0x01, 0xf8, 0x68, 0x6a, 0x78, 0x96, 0x11, - 0x7c, 0x36, 0x3c, 0xcb, 0x28, 0x10, 0x8b, 0xe7, 0x99, 0xe2, 0x2a, 0x9e, 0x91, 0x8a, 0x19, 0x12, - 0x55, 0x67, 0xe0, 0x1a, 0xf5, 0xe9, 0x50, 0x00, 0x66, 0x3c, 0xcc, 0x50, 0x9c, 0xc0, 0x00, 0xaa, - 0x1a, 0xde, 0x21, 0x31, 0x88, 0x2a, 0xc6, 0x4c, 0xe7, 0x4d, 0x7c, 0x5d, 0xf1, 0x2c, 0x57, 0x6b, - 0x33, 0x46, 0xaa, 0xf7, 0xef, 0x34, 0x28, 0x07, 0x71, 0x51, 0x74, 0x37, 0x46, 0x72, 0x18, 0x5e, - 0xad, 0xdd, 0x1b, 0xcd, 0x94, 0x64, 0x01, 0x57, 0x7f, 0x4a, 0x48, 0xdf, 0xa0, 0x8c, 0xc2, 0xf1, - 0xe8, 0x1f, 0x34, 0x98, 0x0e, 0x81, 0x9d, 0x28, 0x4e, 0x43, 0x04, 0x4a, 0xad, 0xdd, 0xbf, 0x80, - 0x4b, 0x18, 0xf2, 0x80, 0x19, 0xb2, 0x80, 0x6f, 0x44, 0x5c, 0xe1, 0x76, 0x7a, 0xc4, 0xb5, 0x84, - 0x31, 0xde, 0x32, 0x70, 0x60, 0x32, 0x76, 0x19, 0x02, 0x40, 0x67, 0xec, 0x32, 0x04, 0x51, 0xcd, - 0x11, 0xcb, 0xc0, 0xd1, 0x48, 0xba, 0xc5, 0x7f, 0x97, 0x86, 0xdc, 0x2a, 0xff, 0x05, 0x22, 0x72, - 0x20, 0xef, 0x21, 0x80, 0x68, 0x3e, 0x0e, 0x8d, 0xf1, 0x6f, 0x0b, 0xb5, 0xdb, 0x89, 0xfd, 0x42, - 0xfb, 0x7d, 0xa6, 0xfd, 0x36, 0xae, 0x49, 0xed, 0xe2, 0x87, 0x8e, 0x75, 0x7e, 0xed, 0xaf, 0x1b, - 0xed, 0x36, 0x9d, 0xf8, 0xdf, 0x42, 0x51, 0x85, 0xe9, 0xd0, 0x9d, 0x58, 0x14, 0x48, 0x45, 0xfa, - 0x6a, 0x78, 0x14, 0x8b, 0xd0, 0xbe, 0xc8, 0xb4, 0x63, 0x7c, 0x2b, 0x41, 0xbb, 0xcd, 0xd8, 0x03, - 0x06, 0x70, 0x98, 0x2d, 0xde, 0x80, 0x00, 0x8a, 0x17, 0x6f, 0x40, 0x10, 0xa5, 0xbb, 0xd0, 0x80, - 0x01, 0x63, 0xa7, 0x06, 0xbc, 0x06, 0xf0, 0x41, 0x35, 0x14, 0xeb, 0x57, 0xe5, 0xea, 0x14, 0x0e, - 0xf9, 0x28, 0x1e, 0x17, 0xdd, 0x73, 0x21, 0xd5, 0xdd, 0x8e, 0x43, 0x43, 0x7f, 0xf9, 0x9b, 0x2c, - 0x14, 0x5e, 0x1a, 0x1d, 0xd3, 0x25, 0xa6, 0x61, 0xb6, 0x08, 0x3a, 0x82, 0x0c, 0x2b, 0x8d, 0xe1, - 0x2c, 0xa7, 0x62, 0x4d, 0xe1, 0x2c, 0x17, 0x00, 0x62, 0xf0, 0x3d, 0xa6, 0x79, 0x1e, 0xcf, 0x49, - 0xcd, 0x3d, 0x5f, 0x7c, 0x9d, 0x61, 0x28, 0x74, 0xc2, 0x7f, 0x09, 0x59, 0x01, 0xcf, 0x87, 0x84, - 0x05, 0xb0, 0x95, 0xda, 0xcd, 0xf8, 0xce, 0xa4, 0xed, 0xa5, 0xaa, 0x72, 0x18, 0x2f, 0xd5, 0xf5, - 0x06, 0xc0, 0x07, 0x08, 0xc3, 0xce, 0x8d, 0xe0, 0x89, 0xb5, 0x85, 0x64, 0x06, 0xa1, 0xf7, 0x11, - 0xd3, 0x7b, 0x17, 0xcf, 0xc7, 0xe9, 0x6d, 0x7b, 0xfc, 0x54, 0xf7, 0x21, 0x4c, 0xae, 0x1b, 0xce, - 0x09, 0x0a, 0x15, 0x3b, 0xe5, 0x47, 0x03, 0xb5, 0x5a, 0x5c, 0x97, 0xd0, 0x74, 0x97, 0x69, 0xba, - 0x85, 0xab, 0x71, 0x9a, 0x4e, 0x0c, 0x87, 0x56, 0x0f, 0x74, 0x02, 0x59, 0xfe, 0x3b, 0x82, 0xb0, - 0x2f, 0x03, 0xbf, 0x45, 0x08, 0xfb, 0x32, 0xf8, 0xd3, 0x83, 0xcb, 0x69, 0x72, 0x61, 0x4a, 0x3e, - 0xde, 0xa3, 0x5b, 0xa1, 0xa5, 0x09, 0x3e, 0xf4, 0xd7, 0xe6, 0x93, 0xba, 0x85, 0xbe, 0x87, 0x4c, - 0xdf, 0x1d, 0x7c, 0x33, 0x76, 0xed, 0x04, 0xf7, 0x33, 0xed, 0xf1, 0xdb, 0x1a, 0x2d, 0x13, 0xe0, - 0x83, 0xac, 0x91, 0xe8, 0x08, 0xe3, 0xb5, 0x91, 0xe8, 0x88, 0xe0, 0xb3, 0x78, 0x99, 0x29, 0x7f, - 0x8a, 0x1f, 0xc6, 0x29, 0x77, 0x6d, 0xc3, 0x74, 0x8e, 0x88, 0xfd, 0x16, 0x07, 0xd3, 0x9c, 0x93, - 0x4e, 0x9f, 0x46, 0xca, 0xef, 0xa7, 0x61, 0x92, 0x9e, 0x47, 0x69, 0x79, 0xf6, 0xaf, 0xf1, 0x61, - 0x6b, 0x22, 0xe0, 0x59, 0xd8, 0x9a, 0x28, 0x02, 0x10, 0x2d, 0xcf, 0xec, 0xb7, 0xe6, 0x84, 0x31, - 0x51, 0xaf, 0x3b, 0x50, 0x50, 0xee, 0xfa, 0x28, 0x46, 0x60, 0x10, 0x99, 0x0b, 0xd7, 0x85, 0x18, - 0xa0, 0x00, 0xdf, 0x66, 0x3a, 0xe7, 0xf0, 0x6c, 0x40, 0x67, 0x9b, 0x73, 0x51, 0xa5, 0x7f, 0x0d, - 0x45, 0x15, 0x13, 0x40, 0x31, 0x32, 0x43, 0xc8, 0x5f, 0x38, 0x25, 0xc6, 0x41, 0x0a, 0xd1, 0xec, - 0xe0, 0xfd, 0xae, 0x5e, 0xb2, 0x52, 0xe5, 0x7d, 0xc8, 0x09, 0xa0, 0x20, 0x6e, 0xb6, 0x41, 0xa8, - 0x30, 0x6e, 0xb6, 0x21, 0x94, 0x21, 0x7a, 0xcc, 0x63, 0x5a, 0xe9, 0x7d, 0x48, 0x96, 0x20, 0xa1, - 0xf1, 0x05, 0x71, 0x93, 0x34, 0xfa, 0xd8, 0x57, 0x92, 0x46, 0xe5, 0x2e, 0x3a, 0x4a, 0xe3, 0x31, - 0x71, 0x45, 0x2c, 0xc9, 0x7b, 0x1e, 0x4a, 0x10, 0xa8, 0xa6, 0x7c, 0x3c, 0x8a, 0x25, 0xe9, 0x54, - 0xee, 0x2b, 0x15, 0xf9, 0x1e, 0x7d, 0x0e, 0xe0, 0x43, 0x1a, 0xe1, 0xd3, 0x56, 0x2c, 0x2e, 0x1a, - 0x3e, 0x6d, 0xc5, 0xa3, 0x22, 0xd1, 0xfc, 0xe1, 0xeb, 0xe6, 0x17, 0x03, 0xaa, 0xfd, 0x5f, 0x34, - 0x40, 0x51, 0x04, 0x04, 0x3d, 0x89, 0xd7, 0x10, 0x8b, 0xb8, 0xd6, 0x9e, 0x5e, 0x8e, 0x39, 0xa9, - 0x44, 0xf8, 0x66, 0xb5, 0xd8, 0x88, 0xfe, 0x6b, 0x6a, 0xd8, 0x97, 0x1a, 0x94, 0x02, 0x10, 0x0a, - 0x7a, 0x90, 0xb0, 0xc6, 0x21, 0xd0, 0xb6, 0xf6, 0xf0, 0x42, 0xbe, 0xa4, 0x93, 0x98, 0xb2, 0x23, - 0xe4, 0x41, 0xfc, 0x1f, 0x35, 0x28, 0x07, 0x61, 0x17, 0x94, 0x20, 0x3f, 0x02, 0xfc, 0xd6, 0x16, - 0x2f, 0x66, 0xbc, 0x78, 0xa9, 0xfc, 0xb3, 0x79, 0x1f, 0x72, 0x02, 0xac, 0x89, 0x0b, 0x88, 0x20, - 0x6c, 0x1c, 0x17, 0x10, 0x21, 0xa4, 0x27, 0x21, 0x20, 0x6c, 0xab, 0x4b, 0x94, 0x10, 0x14, 0x88, - 0x4e, 0x92, 0xc6, 0xd1, 0x21, 0x18, 0x82, 0x83, 0x46, 0x69, 0xf4, 0x43, 0x50, 0xc2, 0x39, 0x28, - 0x41, 0xe0, 0x05, 0x21, 0x18, 0x46, 0x83, 0x12, 0x42, 0x90, 0x29, 0x55, 0x42, 0xd0, 0x07, 0x5f, - 0xe2, 0x42, 0x30, 0x82, 0x88, 0xc7, 0x85, 0x60, 0x14, 0xbf, 0x49, 0x58, 0x57, 0xa6, 0x3b, 0x10, - 0x82, 0x33, 0x31, 0x58, 0x0d, 0x7a, 0x9a, 0xe0, 0xd0, 0x58, 0xb0, 0xbd, 0xf6, 0xd6, 0x25, 0xb9, - 0x47, 0xee, 0x7d, 0xbe, 0x14, 0x72, 0xef, 0x7f, 0xad, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, - 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0xcb, 0xb2, 0x5f, 0xec, 0x35, 0x2f, 0x1a, 0x9e, 0x57, 0xfe, 0xfb, - 0xbb, 0x79, 0xed, 0xff, 0xbe, 0x9b, 0xd7, 0x7e, 0xf1, 0xdd, 0xbc, 0xf6, 0xaf, 0xbf, 0x9c, 0x9f, - 0x38, 0xcc, 0xb2, 0xff, 0xe1, 0xf5, 0xee, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x74, 0x55, 0x61, - 0xe6, 0x68, 0x36, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto deleted file mode 100644 index e80e6e7d0..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ /dev/null @@ -1,1053 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcd/mvcc/mvccpb/kv.proto"; -import "etcd/auth/authpb/auth.proto"; - -// for grpc-gateway -import "google/api/annotations.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service KV { - // Range gets the keys in the range from the key-value store. - rpc Range(RangeRequest) returns (RangeResponse) { - option (google.api.http) = { - post: "/v3beta/kv/range" - body: "*" - }; - } - - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - rpc Put(PutRequest) returns (PutResponse) { - option (google.api.http) = { - post: "/v3beta/kv/put" - body: "*" - }; - } - - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { - option (google.api.http) = { - post: "/v3beta/kv/deleterange" - body: "*" - }; - } - - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - rpc Txn(TxnRequest) returns (TxnResponse) { - option (google.api.http) = { - post: "/v3beta/kv/txn" - body: "*" - }; - } - - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - rpc Compact(CompactionRequest) returns (CompactionResponse) { - option (google.api.http) = { - post: "/v3beta/kv/compaction" - body: "*" - }; - } -} - -service Watch { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - rpc Watch(stream WatchRequest) returns (stream WatchResponse) { - option (google.api.http) = { - post: "/v3beta/watch" - body: "*" - }; - } -} - -service Lease { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { - option (google.api.http) = { - post: "/v3beta/lease/grant" - body: "*" - }; - } - - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { - option (google.api.http) = { - post: "/v3beta/kv/lease/revoke" - body: "*" - }; - } - - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { - option (google.api.http) = { - post: "/v3beta/lease/keepalive" - body: "*" - }; - } - - // LeaseTimeToLive retrieves lease information. - rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { - option (google.api.http) = { - post: "/v3beta/kv/lease/timetolive" - body: "*" - }; - } - - // LeaseLeases lists all existing leases. - rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { - option (google.api.http) = { - post: "/v3beta/kv/lease/leases" - body: "*" - }; - } -} - -service Cluster { - // MemberAdd adds a member into the cluster. - rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { - option (google.api.http) = { - post: "/v3beta/cluster/member/add" - body: "*" - }; - } - - // MemberRemove removes an existing member from the cluster. - rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { - option (google.api.http) = { - post: "/v3beta/cluster/member/remove" - body: "*" - }; - } - - // MemberUpdate updates the member configuration. - rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { - option (google.api.http) = { - post: "/v3beta/cluster/member/update" - body: "*" - }; - } - - // MemberList lists all the members in the cluster. - rpc MemberList(MemberListRequest) returns (MemberListResponse) { - option (google.api.http) = { - post: "/v3beta/cluster/member/list" - body: "*" - }; - } -} - -service Maintenance { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - rpc Alarm(AlarmRequest) returns (AlarmResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/alarm" - body: "*" - }; - } - - // Status gets the status of the member. - rpc Status(StatusRequest) returns (StatusResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/status" - body: "*" - }; - } - - // Defragment defragments a member's backend database to recover storage space. - rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/defragment" - body: "*" - }; - } - - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - rpc Hash(HashRequest) returns (HashResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/hash" - body: "*" - }; - } - - // HashKV computes the hash of all MVCC keys up to a given revision. - rpc HashKV(HashKVRequest) returns (HashKVResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/hash" - body: "*" - }; - } - - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/snapshot" - body: "*" - }; - } - - // MoveLeader requests current leader node to transfer its leadership to transferee. - rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { - option (google.api.http) = { - post: "/v3beta/maintenance/transfer-leadership" - body: "*" - }; - } -} - -service Auth { - // AuthEnable enables authentication. - rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { - option (google.api.http) = { - post: "/v3beta/auth/enable" - body: "*" - }; - } - - // AuthDisable disables authentication. - rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { - option (google.api.http) = { - post: "/v3beta/auth/disable" - body: "*" - }; - } - - // Authenticate processes an authenticate request. - rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { - option (google.api.http) = { - post: "/v3beta/auth/authenticate" - body: "*" - }; - } - - // UserAdd adds a new user. - rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/add" - body: "*" - }; - } - - // UserGet gets detailed user information. - rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/get" - body: "*" - }; - } - - // UserList gets a list of all users. - rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/list" - body: "*" - }; - } - - // UserDelete deletes a specified user. - rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/delete" - body: "*" - }; - } - - // UserChangePassword changes the password of a specified user. - rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/changepw" - body: "*" - }; - } - - // UserGrant grants a role to a specified user. - rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/grant" - body: "*" - }; - } - - // UserRevokeRole revokes a role of specified user. - rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { - option (google.api.http) = { - post: "/v3beta/auth/user/revoke" - body: "*" - }; - } - - // RoleAdd adds a new role. - rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { - option (google.api.http) = { - post: "/v3beta/auth/role/add" - body: "*" - }; - } - - // RoleGet gets detailed role information. - rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { - option (google.api.http) = { - post: "/v3beta/auth/role/get" - body: "*" - }; - } - - // RoleList gets lists of all roles. - rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { - option (google.api.http) = { - post: "/v3beta/auth/role/list" - body: "*" - }; - } - - // RoleDelete deletes a specified role. - rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { - option (google.api.http) = { - post: "/v3beta/auth/role/delete" - body: "*" - }; - } - - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { - option (google.api.http) = { - post: "/v3beta/auth/role/grant" - body: "*" - }; - } - - // RoleRevokePermission revokes a key or range permission of a specified role. - rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { - option (google.api.http) = { - post: "/v3beta/auth/role/revoke" - body: "*" - }; - } -} - -message ResponseHeader { - // cluster_id is the ID of the cluster which sent the response. - uint64 cluster_id = 1; - // member_id is the ID of the member which sent the response. - uint64 member_id = 2; - // revision is the key-value store revision when the request was applied. - int64 revision = 3; - // raft_term is the raft term when the request was applied. - uint64 raft_term = 4; -} - -message RangeRequest { - enum SortOrder { - NONE = 0; // default, no sorting - ASCEND = 1; // lowest target value first - DESCEND = 2; // highest target value first - } - enum SortTarget { - KEY = 0; - VERSION = 1; - CREATE = 2; - MOD = 3; - VALUE = 4; - } - - // key is the first key for the range. If range_end is not given, the request only looks up key. - bytes key = 1; - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. - bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. - int64 limit = 3; - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - int64 revision = 4; - - // sort_order is the order for returned sorted results. - SortOrder sort_order = 5; - - // sort_target is the key-value field to use for sorting. - SortTarget sort_target = 6; - - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - bool serializable = 7; - - // keys_only when set returns only the keys and not the values. - bool keys_only = 8; - - // count_only when set returns only the count of the keys in the range. - bool count_only = 9; - - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - int64 min_mod_revision = 10; - - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - int64 max_mod_revision = 11; - - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. - int64 min_create_revision = 12; - - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - int64 max_create_revision = 13; -} - -message RangeResponse { - ResponseHeader header = 1; - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - repeated mvccpb.KeyValue kvs = 2; - // more indicates if there are more keys to return in the requested range. - bool more = 3; - // count is set to the number of keys within the range when requested. - int64 count = 4; -} - -message PutRequest { - // key is the key, in bytes, to put into the key-value store. - bytes key = 1; - // value is the value, in bytes, to associate with the key in the key-value store. - bytes value = 2; - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - int64 lease = 3; - - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - bool prev_kv = 4; - - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - bool ignore_value = 5; - - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - bool ignore_lease = 6; -} - -message PutResponse { - ResponseHeader header = 1; - // if prev_kv is set in the request, the previous key-value pair will be returned. - mvccpb.KeyValue prev_kv = 2; -} - -message DeleteRangeRequest { - // key is the first key to delete in the range. - bytes key = 1; - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - bytes range_end = 2; - - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. - bool prev_kv = 3; -} - -message DeleteRangeResponse { - ResponseHeader header = 1; - // deleted is the number of keys deleted by the delete range request. - int64 deleted = 2; - // if prev_kv is set in the request, the previous key-value pairs will be returned. - repeated mvccpb.KeyValue prev_kvs = 3; -} - -message RequestOp { - // request is a union of request types accepted by a transaction. - oneof request { - RangeRequest request_range = 1; - PutRequest request_put = 2; - DeleteRangeRequest request_delete_range = 3; - TxnRequest request_txn = 4; - } -} - -message ResponseOp { - // response is a union of response types returned by a transaction. - oneof response { - RangeResponse response_range = 1; - PutResponse response_put = 2; - DeleteRangeResponse response_delete_range = 3; - TxnResponse response_txn = 4; - } -} - -message Compare { - enum CompareResult { - EQUAL = 0; - GREATER = 1; - LESS = 2; - NOT_EQUAL = 3; - } - enum CompareTarget { - VERSION = 0; - CREATE = 1; - MOD = 2; - VALUE= 3; - LEASE = 4; - } - // result is logical comparison operation for this comparison. - CompareResult result = 1; - // target is the key-value field to inspect for the comparison. - CompareTarget target = 2; - // key is the subject key for the comparison operation. - bytes key = 3; - oneof target_union { - // version is the version of the given key - int64 version = 4; - // create_revision is the creation revision of the given key - int64 create_revision = 5; - // mod_revision is the last modified revision of the given key. - int64 mod_revision = 6; - // value is the value of the given key, in bytes. - bytes value = 7; - // lease is the lease id of the given key. - int64 lease = 8; - // leave room for more target_union field tags, jump to 64 - } - - // range_end compares the given target to all keys in the range [key, range_end). - // See RangeRequest for more details on key ranges. - bytes range_end = 64; - // TODO: fill out with most of the rest of RangeRequest fields when needed. -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -message TxnRequest { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - repeated Compare compare = 1; - // success is a list of requests which will be applied when compare evaluates to true. - repeated RequestOp success = 2; - // failure is a list of requests which will be applied when compare evaluates to false. - repeated RequestOp failure = 3; -} - -message TxnResponse { - ResponseHeader header = 1; - // succeeded is set to true if the compare evaluated to true or false otherwise. - bool succeeded = 2; - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - repeated ResponseOp responses = 3; -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -message CompactionRequest { - // revision is the key-value store revision for the compaction operation. - int64 revision = 1; - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - bool physical = 2; -} - -message CompactionResponse { - ResponseHeader header = 1; -} - -message HashRequest { -} - -message HashKVRequest { - // revision is the key-value store revision for the hash operation. - int64 revision = 1; -} - -message HashKVResponse { - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's MVCC keys up to a given revision. - uint32 hash = 2; - // compact_revision is the compacted revision of key-value store when hash begins. - int64 compact_revision = 3; -} - -message HashResponse { - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's KV's backend. - uint32 hash = 2; -} - -message SnapshotRequest { -} - -message SnapshotResponse { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - ResponseHeader header = 1; - - // remaining_bytes is the number of blob bytes to be sent after this message - uint64 remaining_bytes = 2; - - // blob contains the next chunk of the snapshot in the snapshot stream. - bytes blob = 3; -} - -message WatchRequest { - // request_union is a request to either create a new watcher or cancel an existing watcher. - oneof request_union { - WatchCreateRequest create_request = 1; - WatchCancelRequest cancel_request = 2; - } -} - -message WatchCreateRequest { - // key is the key to register for watching. - bytes key = 1; - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - bytes range_end = 2; - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - int64 start_revision = 3; - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - bool progress_notify = 4; - - enum FilterType { - // filter out put event. - NOPUT = 0; - // filter out delete event. - NODELETE = 1; - } - // filters filter the events at server side before it sends back to the watcher. - repeated FilterType filters = 5; - - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - bool prev_kv = 6; -} - -message WatchCancelRequest { - // watch_id is the watcher id to cancel so that no more events are transmitted. - int64 watch_id = 1; -} - -message WatchResponse { - ResponseHeader header = 1; - // watch_id is the ID of the watcher that corresponds to the response. - int64 watch_id = 2; - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - bool created = 3; - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - bool canceled = 4; - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - int64 compact_revision = 5; - - // cancel_reason indicates the reason for canceling the watcher. - string cancel_reason = 6; - - repeated mvccpb.Event events = 11; -} - -message LeaseGrantRequest { - // TTL is the advisory time-to-live in seconds. Expired lease will return -1. - int64 TTL = 1; - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - int64 ID = 2; -} - -message LeaseGrantResponse { - ResponseHeader header = 1; - // ID is the lease ID for the granted lease. - int64 ID = 2; - // TTL is the server chosen lease time-to-live in seconds. - int64 TTL = 3; - string error = 4; -} - -message LeaseRevokeRequest { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - int64 ID = 1; -} - -message LeaseRevokeResponse { - ResponseHeader header = 1; -} - -message LeaseKeepAliveRequest { - // ID is the lease ID for the lease to keep alive. - int64 ID = 1; -} - -message LeaseKeepAliveResponse { - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the new time-to-live for the lease. - int64 TTL = 3; -} - -message LeaseTimeToLiveRequest { - // ID is the lease ID for the lease. - int64 ID = 1; - // keys is true to query all the keys attached to this lease. - bool keys = 2; -} - -message LeaseTimeToLiveResponse { - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - int64 TTL = 3; - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - int64 grantedTTL = 4; - // Keys is the list of keys attached to this lease. - repeated bytes keys = 5; -} - -message LeaseLeasesRequest { -} - -message LeaseStatus { - int64 ID = 1; - // TODO: int64 TTL = 2; -} - -message LeaseLeasesResponse { - ResponseHeader header = 1; - repeated LeaseStatus leases = 2; -} - -message Member { - // ID is the member ID for this member. - uint64 ID = 1; - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - string name = 2; - // peerURLs is the list of URLs the member exposes to the cluster for communication. - repeated string peerURLs = 3; - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - repeated string clientURLs = 4; -} - -message MemberAddRequest { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - repeated string peerURLs = 1; -} - -message MemberAddResponse { - ResponseHeader header = 1; - // member is the member information for the added member. - Member member = 2; - // members is a list of all members after adding the new member. - repeated Member members = 3; -} - -message MemberRemoveRequest { - // ID is the member ID of the member to remove. - uint64 ID = 1; -} - -message MemberRemoveResponse { - ResponseHeader header = 1; - // members is a list of all members after removing the member. - repeated Member members = 2; -} - -message MemberUpdateRequest { - // ID is the member ID of the member to update. - uint64 ID = 1; - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - repeated string peerURLs = 2; -} - -message MemberUpdateResponse{ - ResponseHeader header = 1; - // members is a list of all members after updating the member. - repeated Member members = 2; -} - -message MemberListRequest { -} - -message MemberListResponse { - ResponseHeader header = 1; - // members is a list of all members associated with the cluster. - repeated Member members = 2; -} - -message DefragmentRequest { -} - -message DefragmentResponse { - ResponseHeader header = 1; -} - -message MoveLeaderRequest { - // targetID is the node ID for the new leader. - uint64 targetID = 1; -} - -message MoveLeaderResponse { - ResponseHeader header = 1; -} - -enum AlarmType { - NONE = 0; // default, used to query if any alarm is active - NOSPACE = 1; // space quota is exhausted - CORRUPT = 2; // kv store corruption detected -} - -message AlarmRequest { - enum AlarmAction { - GET = 0; - ACTIVATE = 1; - DEACTIVATE = 2; - } - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - AlarmAction action = 1; - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - uint64 memberID = 2; - // alarm is the type of alarm to consider for this request. - AlarmType alarm = 3; -} - -message AlarmMember { - // memberID is the ID of the member associated with the raised alarm. - uint64 memberID = 1; - // alarm is the type of alarm which has been raised. - AlarmType alarm = 2; -} - -message AlarmResponse { - ResponseHeader header = 1; - // alarms is a list of alarms associated with the alarm request. - repeated AlarmMember alarms = 2; -} - -message StatusRequest { -} - -message StatusResponse { - ResponseHeader header = 1; - // version is the cluster protocol version used by the responding member. - string version = 2; - // dbSize is the size of the backend database, in bytes, of the responding member. - int64 dbSize = 3; - // leader is the member ID which the responding member believes is the current leader. - uint64 leader = 4; - // raftIndex is the current raft index of the responding member. - uint64 raftIndex = 5; - // raftTerm is the current raft term of the responding member. - uint64 raftTerm = 6; -} - -message AuthEnableRequest { -} - -message AuthDisableRequest { -} - -message AuthenticateRequest { - string name = 1; - string password = 2; -} - -message AuthUserAddRequest { - string name = 1; - string password = 2; -} - -message AuthUserGetRequest { - string name = 1; -} - -message AuthUserDeleteRequest { - // name is the name of the user to delete. - string name = 1; -} - -message AuthUserChangePasswordRequest { - // name is the name of the user whose password is being changed. - string name = 1; - // password is the new password for the user. - string password = 2; -} - -message AuthUserGrantRoleRequest { - // user is the name of the user which should be granted a given role. - string user = 1; - // role is the name of the role to grant to the user. - string role = 2; -} - -message AuthUserRevokeRoleRequest { - string name = 1; - string role = 2; -} - -message AuthRoleAddRequest { - // name is the name of the role to add to the authentication system. - string name = 1; -} - -message AuthRoleGetRequest { - string role = 1; -} - -message AuthUserListRequest { -} - -message AuthRoleListRequest { -} - -message AuthRoleDeleteRequest { - string role = 1; -} - -message AuthRoleGrantPermissionRequest { - // name is the name of the role which will be granted the permission. - string name = 1; - // perm is the permission to grant to the role. - authpb.Permission perm = 2; -} - -message AuthRoleRevokePermissionRequest { - string role = 1; - string key = 2; - string range_end = 3; -} - -message AuthEnableResponse { - ResponseHeader header = 1; -} - -message AuthDisableResponse { - ResponseHeader header = 1; -} - -message AuthenticateResponse { - ResponseHeader header = 1; - // token is an authorized token that can be used in succeeding RPCs - string token = 2; -} - -message AuthUserAddResponse { - ResponseHeader header = 1; -} - -message AuthUserGetResponse { - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserDeleteResponse { - ResponseHeader header = 1; -} - -message AuthUserChangePasswordResponse { - ResponseHeader header = 1; -} - -message AuthUserGrantRoleResponse { - ResponseHeader header = 1; -} - -message AuthUserRevokeRoleResponse { - ResponseHeader header = 1; -} - -message AuthRoleAddResponse { - ResponseHeader header = 1; -} - -message AuthRoleGetResponse { - ResponseHeader header = 1; - - repeated authpb.Permission perm = 2; -} - -message AuthRoleListResponse { - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserListResponse { - ResponseHeader header = 1; - - repeated string users = 2; -} - -message AuthRoleDeleteResponse { - ResponseHeader header = 1; -} - -message AuthRoleGrantPermissionResponse { - ResponseHeader header = 1; -} - -message AuthRoleRevokePermissionResponse { - ResponseHeader header = 1; -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go deleted file mode 100644 index 23fe337a5..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ /dev/null @@ -1,718 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: kv.proto - -/* - Package mvccpb is a generated protocol buffer package. - - It is generated from these files: - kv.proto - - It has these top-level messages: - KeyValue - Event -*/ -package mvccpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Event_EventType int32 - -const ( - PUT Event_EventType = 0 - DELETE Event_EventType = 1 -) - -var Event_EventType_name = map[int32]string{ - 0: "PUT", - 1: "DELETE", -} -var Event_EventType_value = map[string]int32{ - "PUT": 0, - "DELETE": 1, -} - -func (x Event_EventType) String() string { - return proto.EnumName(Event_EventType_name, int32(x)) -} -func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } - -type KeyValue struct { - // key is the key in bytes. An empty key is not allowed. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // create_revision is the revision of last creation on this key. - CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` - // mod_revision is the revision of last modification on this key. - ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - // value is the value held by the key, in bytes. - Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } - -type Event struct { - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` - // prev_kv holds the key-value pair before the event happens. - PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } - -func init() { - proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") - proto.RegisterType((*Event)(nil), "mvccpb.Event") - proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) -} -func (m *KeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.CreateRevision != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) - } - if m.Version != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Version)) - } - if len(m.Value) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - if m.Lease != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Lease)) - } - return i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Type)) - } - if m.Kv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) - n1, err := m.Kv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PrevKv != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) - n2, err := m.PrevKv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *KeyValue) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.CreateRevision != 0 { - n += 1 + sovKv(uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - n += 1 + sovKv(uint64(m.ModRevision)) - } - if m.Version != 0 { - n += 1 + sovKv(uint64(m.Version)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovKv(uint64(m.Lease)) - } - return n -} - -func (m *Event) Size() (n int) { - var l int - _ = l - if m.Type != 0 { - n += 1 + sovKv(uint64(m.Type)) - } - if m.Kv != nil { - l = m.Kv.Size() - n += 1 + l + sovKv(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovKv(uint64(l)) - } - return n -} - -func sovKv(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozKv(x uint64) (n int) { - return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - m.CreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - m.ModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (Event_EventType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kv == nil { - m.Kv = &KeyValue{} - } - if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKv(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthKv - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipKv(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } - -var fileDescriptorKv = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto deleted file mode 100644 index 23c911b7d..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; -package mvccpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -message KeyValue { - // key is the key in bytes. An empty key is not allowed. - bytes key = 1; - // create_revision is the revision of last creation on this key. - int64 create_revision = 2; - // mod_revision is the revision of last modification on this key. - int64 mod_revision = 3; - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - int64 version = 4; - // value is the value held by the key, in bytes. - bytes value = 5; - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - int64 lease = 6; -} - -message Event { - enum EventType { - PUT = 0; - DELETE = 1; - } - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - EventType type = 1; - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - KeyValue kv = 2; - - // prev_kv holds the key-value pair before the event happens. - KeyValue prev_kv = 3; -} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go deleted file mode 100644 index b5916bb54..000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import "crypto/tls" - -// cipher suites implemented by Go -// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go -var cipherSuites = map[string]uint16{ - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, -} - -// GetCipherSuite returns the corresponding cipher suite, -// and boolean value if it is supported. -func GetCipherSuite(s string) (uint16, bool) { - v, ok := cipherSuites[s] - return v, ok -} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go deleted file mode 100644 index 3b6aa670b..000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tlsutil provides utility functions for handling TLS. -package tlsutil diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go deleted file mode 100644 index 79b1f632e..000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "io/ioutil" -) - -// NewCertPool creates x509 certPool with provided CA files. -func NewCertPool(CAFiles []string) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, CAFile := range CAFiles { - pemByte, err := ioutil.ReadFile(CAFile) - if err != nil { - return nil, err - } - - for { - var block *pem.Block - block, pemByte = pem.Decode(pemByte) - if block == nil { - break - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certPool.AddCert(cert) - } - } - - return certPool, nil -} - -// NewCert generates TLS cert by using the given cert,key and parse function. -func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := ioutil.ReadFile(certfile) - if err != nil { - return nil, err - } - - key, err := ioutil.ReadFile(keyfile) - if err != nil { - return nil, err - } - - if parseFunc == nil { - parseFunc = tls.X509KeyPair - } - - tlsCert, err := parseFunc(cert, key) - if err != nil { - return nil, err - } - return &tlsCert, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go deleted file mode 100644 index 37658ce59..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport implements various HTTP transport utilities based on Go -// net package. -package transport diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go deleted file mode 100644 index 4ff8e7f00..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "fmt" - "net" - "time" -) - -type keepAliveConn interface { - SetKeepAlive(bool) error - SetKeepAlivePeriod(d time.Duration) error -} - -// NewKeepAliveListener returns a listener that listens on the given address. -// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. -// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. -// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html -func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { - if scheme == "https" { - if tlscfg == nil { - return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") - } - return newTLSKeepaliveListener(l, tlscfg), nil - } - - return &keepaliveListener{ - Listener: l, - }, nil -} - -type keepaliveListener struct{ net.Listener } - -func (kln *keepaliveListener) Accept() (net.Conn, error) { - c, err := kln.Listener.Accept() - if err != nil { - return nil, err - } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - return c, nil -} - -// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. -type tlsKeepaliveListener struct { - net.Listener - config *tls.Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - c = tls.Server(c, l.config) - return c, nil -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { - l := &tlsKeepaliveListener{} - l.Listener = inner - l.config = config - return l -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go deleted file mode 100644 index 930c54206..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2013 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport provides network utility functions, complementing the more -// common ones in the net package. -package transport - -import ( - "errors" - "net" - "sync" - "time" -) - -var ( - ErrNotTCP = errors.New("only tcp connections have keepalive") -) - -// LimitListener returns a Listener that accepts at most n simultaneous -// connections from the provided Listener. -func LimitListener(l net.Listener, n int) net.Listener { - return &limitListener{l, make(chan struct{}, n)} -} - -type limitListener struct { - net.Listener - sem chan struct{} -} - -func (l *limitListener) acquire() { l.sem <- struct{}{} } -func (l *limitListener) release() { <-l.sem } - -func (l *limitListener) Accept() (net.Conn, error) { - l.acquire() - c, err := l.Listener.Accept() - if err != nil { - l.release() - return nil, err - } - return &limitListenerConn{Conn: c, release: l.release}, nil -} - -type limitListenerConn struct { - net.Conn - releaseOnce sync.Once - release func() -} - -func (l *limitListenerConn) Close() error { - err := l.Conn.Close() - l.releaseOnce.Do(l.release) - return err -} - -func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { - tcpc, ok := l.Conn.(*net.TCPConn) - if !ok { - return ErrNotTCP - } - return tcpc.SetKeepAlive(doKeepAlive) -} - -func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { - tcpc, ok := l.Conn.(*net.TCPConn) - if !ok { - return ErrNotTCP - } - return tcpc.SetKeepAlivePeriod(d) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go deleted file mode 100644 index 48655063f..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "os" - "path/filepath" - "strings" - "time" - - "github.com/coreos/etcd/pkg/tlsutil" -) - -func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { - if l, err = newListener(addr, scheme); err != nil { - return nil, err - } - return wrapTLS(addr, scheme, tlsinfo, l) -} - -func newListener(addr string, scheme string) (net.Listener, error) { - if scheme == "unix" || scheme == "unixs" { - // unix sockets via unix://laddr - return NewUnixListener(addr) - } - return net.Listen("tcp", addr) -} - -func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { - if scheme != "https" && scheme != "unixs" { - return l, nil - } - return newTLSListener(l, tlsinfo, checkSAN) -} - -type TLSInfo struct { - CertFile string - KeyFile string - CAFile string // TODO: deprecate this in v4 - TrustedCAFile string - ClientCertAuth bool - CRLFile string - InsecureSkipVerify bool - - // ServerName ensures the cert matches the given host in case of discovery / virtual hosting - ServerName string - - // HandshakeFailure is optionally called when a connection fails to handshake. The - // connection will be closed immediately afterwards. - HandshakeFailure func(*tls.Conn, error) - - // CipherSuites is a list of supported cipher suites. - // If empty, Go auto-populates it by default. - // Note that cipher suites are prioritized in the given order. - CipherSuites []uint16 - - selfCert bool - - // parseFunc exists to simplify testing. Typically, parseFunc - // should be left nil. In that case, tls.X509KeyPair will be used. - parseFunc func([]byte, []byte) (tls.Certificate, error) - - // AllowedCN is a CN which must be provided by a client. - AllowedCN string -} - -func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) -} - -func (info TLSInfo) Empty() bool { - return info.CertFile == "" && info.KeyFile == "" -} - -func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = os.MkdirAll(dirpath, 0700); err != nil { - return - } - - certPath := filepath.Join(dirpath, "cert.pem") - keyPath := filepath.Join(dirpath, "key.pem") - _, errcert := os.Stat(certPath) - _, errkey := os.Stat(keyPath) - if errcert == nil && errkey == nil { - info.CertFile = certPath - info.KeyFile = keyPath - info.selfCert = true - return - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return - } - - tmpl := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"etcd"}}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(365 * (24 * time.Hour)), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - for _, host := range hosts { - h, _, _ := net.SplitHostPort(host) - if ip := net.ParseIP(h); ip != nil { - tmpl.IPAddresses = append(tmpl.IPAddresses, ip) - } else { - tmpl.DNSNames = append(tmpl.DNSNames, h) - } - } - - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) - if err != nil { - return - } - - certOut, err := os.Create(certPath) - if err != nil { - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - b, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return - } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return - } - pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) - keyOut.Close() - - return SelfCert(dirpath, hosts) -} - -func (info TLSInfo) baseConfig() (*tls.Config, error) { - if info.KeyFile == "" || info.CertFile == "" { - return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) - } - - _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - if err != nil { - return nil, err - } - - cfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - ServerName: info.ServerName, - } - - if len(info.CipherSuites) > 0 { - cfg.CipherSuites = info.CipherSuites - } - - if info.AllowedCN != "" { - cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - for _, chains := range verifiedChains { - if len(chains) != 0 { - if info.AllowedCN == chains[0].Subject.CommonName { - return nil - } - } - } - return errors.New("CommonName authentication failed") - } - } - - // this only reloads certs when there's a client request - // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - } - return cfg, nil -} - -// cafiles returns a list of CA file paths. -func (info TLSInfo) cafiles() []string { - cs := make([]string, 0) - if info.CAFile != "" { - cs = append(cs, info.CAFile) - } - if info.TrustedCAFile != "" { - cs = append(cs, info.TrustedCAFile) - } - return cs -} - -// ServerConfig generates a tls.Config object for use by an HTTP server. -func (info TLSInfo) ServerConfig() (*tls.Config, error) { - cfg, err := info.baseConfig() - if err != nil { - return nil, err - } - - cfg.ClientAuth = tls.NoClientCert - if info.CAFile != "" || info.ClientCertAuth { - cfg.ClientAuth = tls.RequireAndVerifyClientCert - } - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cp, err := tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - cfg.ClientCAs = cp - } - - // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server - cfg.NextProtos = []string{"h2"} - - return cfg, nil -} - -// ClientConfig generates a tls.Config object for use by an HTTP client. -func (info TLSInfo) ClientConfig() (*tls.Config, error) { - var cfg *tls.Config - var err error - - if !info.Empty() { - cfg, err = info.baseConfig() - if err != nil { - return nil, err - } - } else { - cfg = &tls.Config{ServerName: info.ServerName} - } - cfg.InsecureSkipVerify = info.InsecureSkipVerify - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - } - - if info.selfCert { - cfg.InsecureSkipVerify = true - } - return cfg, nil -} - -// IsClosedConnError returns true if the error is from closing listener, cmux. -// copied from golang.org/x/net/http2/http2.go -func IsClosedConnError(err error) bool { - // 'use of closed network connection' (Go <=1.8) - // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) - // 'mux: listener closed' (cmux.ErrListenerClosed) - return err != nil && strings.Contains(err.Error(), "closed") -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go deleted file mode 100644 index 6f1600945..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "strings" - "sync" -) - -// tlsListener overrides a TLS listener so it will reject client -// certificates with insufficient SAN credentials or CRL revoked -// certificates. -type tlsListener struct { - net.Listener - connc chan net.Conn - donec chan struct{} - err error - handshakeFailure func(*tls.Conn, error) - check tlsCheckFunc -} - -type tlsCheckFunc func(context.Context, *tls.Conn) error - -// NewTLSListener handshakes TLS connections and performs optional CRL checking. -func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { - check := func(context.Context, *tls.Conn) error { return nil } - return newTLSListener(l, tlsinfo, check) -} - -func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { - if tlsinfo == nil || tlsinfo.Empty() { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) - } - tlscfg, err := tlsinfo.ServerConfig() - if err != nil { - return nil, err - } - - hf := tlsinfo.HandshakeFailure - if hf == nil { - hf = func(*tls.Conn, error) {} - } - - if len(tlsinfo.CRLFile) > 0 { - prevCheck := check - check = func(ctx context.Context, tlsConn *tls.Conn) error { - if err := prevCheck(ctx, tlsConn); err != nil { - return err - } - st := tlsConn.ConnectionState() - if certs := st.PeerCertificates; len(certs) > 0 { - return checkCRL(tlsinfo.CRLFile, certs) - } - return nil - } - } - - tlsl := &tlsListener{ - Listener: tls.NewListener(l, tlscfg), - connc: make(chan net.Conn), - donec: make(chan struct{}), - handshakeFailure: hf, - check: check, - } - go tlsl.acceptLoop() - return tlsl, nil -} - -func (l *tlsListener) Accept() (net.Conn, error) { - select { - case conn := <-l.connc: - return conn, nil - case <-l.donec: - return nil, l.err - } -} - -func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { - st := tlsConn.ConnectionState() - if certs := st.PeerCertificates; len(certs) > 0 { - addr := tlsConn.RemoteAddr().String() - return checkCertSAN(ctx, certs[0], addr) - } - return nil -} - -// acceptLoop launches each TLS handshake in a separate goroutine -// to prevent a hanging TLS connection from blocking other connections. -func (l *tlsListener) acceptLoop() { - var wg sync.WaitGroup - var pendingMu sync.Mutex - - pending := make(map[net.Conn]struct{}) - ctx, cancel := context.WithCancel(context.Background()) - defer func() { - cancel() - pendingMu.Lock() - for c := range pending { - c.Close() - } - pendingMu.Unlock() - wg.Wait() - close(l.donec) - }() - - for { - conn, err := l.Listener.Accept() - if err != nil { - l.err = err - return - } - - pendingMu.Lock() - pending[conn] = struct{}{} - pendingMu.Unlock() - - wg.Add(1) - go func() { - defer func() { - if conn != nil { - conn.Close() - } - wg.Done() - }() - - tlsConn := conn.(*tls.Conn) - herr := tlsConn.Handshake() - pendingMu.Lock() - delete(pending, conn) - pendingMu.Unlock() - - if herr != nil { - l.handshakeFailure(tlsConn, herr) - return - } - if err := l.check(ctx, tlsConn); err != nil { - l.handshakeFailure(tlsConn, err) - return - } - - select { - case l.connc <- tlsConn: - conn = nil - case <-ctx.Done(): - } - }() - } -} - -func checkCRL(crlPath string, cert []*x509.Certificate) error { - // TODO: cache - crlBytes, err := ioutil.ReadFile(crlPath) - if err != nil { - return err - } - certList, err := x509.ParseCRL(crlBytes) - if err != nil { - return err - } - revokedSerials := make(map[string]struct{}) - for _, rc := range certList.TBSCertList.RevokedCertificates { - revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} - } - for _, c := range cert { - serial := string(c.SerialNumber.Bytes()) - if _, ok := revokedSerials[serial]; ok { - return fmt.Errorf("transport: certificate serial %x revoked", serial) - } - } - return nil -} - -func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { - if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { - return nil - } - h, _, herr := net.SplitHostPort(remoteAddr) - if herr != nil { - return herr - } - if len(cert.IPAddresses) > 0 { - cerr := cert.VerifyHostname(h) - if cerr == nil { - return nil - } - if len(cert.DNSNames) == 0 { - return cerr - } - } - if len(cert.DNSNames) > 0 { - ok, err := isHostInDNS(ctx, h, cert.DNSNames) - if ok { - return nil - } - errStr := "" - if err != nil { - errStr = " (" + err.Error() + ")" - } - return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) - } - return nil -} - -func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { - // reverse lookup - wildcards, names := []string{}, []string{} - for _, dns := range dnsNames { - if strings.HasPrefix(dns, "*.") { - wildcards = append(wildcards, dns[1:]) - } else { - names = append(names, dns) - } - } - lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) - for _, name := range lnames { - // strip trailing '.' from PTR record - if name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - for _, wc := range wildcards { - if strings.HasSuffix(name, wc) { - return true, nil - } - } - for _, n := range names { - if n == name { - return true, nil - } - } - } - err = lerr - - // forward lookup - for _, dns := range names { - addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) - if lerr != nil { - err = lerr - continue - } - for _, addr := range addrs { - if addr == host { - return true, nil - } - } - } - return false, err -} - -func (l *tlsListener) Close() error { - err := l.Listener.Close() - <-l.donec - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go deleted file mode 100644 index 7e8c02030..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type timeoutConn struct { - net.Conn - wtimeoutd time.Duration - rdtimeoutd time.Duration -} - -func (c timeoutConn) Write(b []byte) (n int, err error) { - if c.wtimeoutd > 0 { - if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { - return 0, err - } - } - return c.Conn.Write(b) -} - -func (c timeoutConn) Read(b []byte) (n int, err error) { - if c.rdtimeoutd > 0 { - if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { - return 0, err - } - } - return c.Conn.Read(b) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go deleted file mode 100644 index 6ae39ecfc..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type rwTimeoutDialer struct { - wtimeoutd time.Duration - rdtimeoutd time.Duration - net.Dialer -} - -func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { - conn, err := d.Dialer.Dial(network, address) - tconn := &timeoutConn{ - rdtimeoutd: d.rdtimeoutd, - wtimeoutd: d.wtimeoutd, - Conn: conn, - } - return tconn, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go deleted file mode 100644 index b35e04955..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -// NewTimeoutListener returns a listener that listens on the given address. -// If read/write on the accepted connection blocks longer than its time limit, -// it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { - ln, err := newListener(addr, scheme) - if err != nil { - return nil, err - } - ln = &rwTimeoutListener{ - Listener: ln, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - } - if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { - return nil, err - } - return ln, nil -} - -type rwTimeoutListener struct { - net.Listener - wtimeoutd time.Duration - rdtimeoutd time.Duration -} - -func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { - c, err := rwln.Listener.Accept() - if err != nil { - return nil, err - } - return timeoutConn{ - Conn: c, - wtimeoutd: rwln.wtimeoutd, - rdtimeoutd: rwln.rdtimeoutd, - }, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go deleted file mode 100644 index ea16b4c0f..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "time" -) - -// NewTimeoutTransport returns a transport created using the given TLS info. -// If read/write on the created connection blocks longer than its time limit, -// it will return timeout error. -// If read/write timeout is set, transport will not be able to reuse connection. -func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { - tr, err := NewTransport(info, dialtimeoutd) - if err != nil { - return nil, err - } - - if rdtimeoutd != 0 || wtimeoutd != 0 { - // the timed out connection will timeout soon after it is idle. - // it should not be put back to http transport as an idle connection for future usage. - tr.MaxIdleConnsPerHost = -1 - } else { - // allow more idle connections between peers to avoid unnecessary port allocation. - tr.MaxIdleConnsPerHost = 1024 - } - - tr.Dial = (&rwTimeoutDialer{ - Dialer: net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - }).Dial - return tr, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go deleted file mode 100644 index 62fe0d385..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/tls.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "strings" - "time" -) - -// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those -// endpoints that could be validated as secure. -func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { - t, err := NewTransport(tlsInfo, 5*time.Second) - if err != nil { - return nil, err - } - var errs []string - var endpoints []string - for _, ep := range eps { - if !strings.HasPrefix(ep, "https://") { - errs = append(errs, fmt.Sprintf("%q is insecure", ep)) - continue - } - conn, cerr := t.Dial("tcp", ep[len("https://"):]) - if cerr != nil { - errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) - continue - } - conn.Close() - endpoints = append(endpoints, ep) - } - if len(errs) != 0 { - err = fmt.Errorf("%s", strings.Join(errs, ",")) - } - return endpoints, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go deleted file mode 100644 index 4a7fe69d2..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/transport.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "strings" - "time" -) - -type unixTransport struct{ *http.Transport } - -func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { - cfg, err := info.ClientConfig() - if err != nil { - return nil, err - } - - t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: dialtimeoutd, - // value taken from http.DefaultTransport - KeepAlive: 30 * time.Second, - }).Dial, - // value taken from http.DefaultTransport - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - - dialer := (&net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }) - dial := func(net, addr string) (net.Conn, error) { - return dialer.Dial("unix", addr) - } - - tu := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - ut := &unixTransport{tu} - - t.RegisterProtocol("unix", ut) - t.RegisterProtocol("unixs", ut) - - return t, nil -} - -func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { - url := *req.URL - req.URL = &url - req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) - return urt.Transport.RoundTrip(req) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go deleted file mode 100644 index 123e2036f..000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "os" -) - -type unixListener struct{ net.Listener } - -func NewUnixListener(addr string) (net.Listener, error) { - if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - return nil, err - } - l, err := net.Listen("unix", addr) - if err != nil { - return nil, err - } - return &unixListener{l}, nil -} - -func (ul *unixListener) Close() error { - if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { - return err - } - return ul.Listener.Close() -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go deleted file mode 100644 index de8ef0bd7..000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types declares various data types and implements type-checking -// functions. -package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 1b042d9ce..000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index c111b0c0c..000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return exists -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val := range us.d { - values = append(values, val) - } - return values -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0dd9ca798..000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index 9e5d03ff6..000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { - return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func MustNewURLs(strs []string) URLs { - urls, err := NewURLs(strs) - if err != nil { - panic(err) - } - return urls -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go deleted file mode 100644 index 47690cc38..000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "sort" - "strings" -) - -// URLsMap is a map from a name to its URLs. -type URLsMap map[string]URLs - -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: -// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 -func NewURLsMap(s string) (URLsMap, error) { - m := parse(s) - - cl := URLsMap{} - for name, urls := range m { - us, err := NewURLs(urls) - if err != nil { - return nil, err - } - cl[name] = us - } - return cl, nil -} - -// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The -// string values in the map can be multiple values separated by the sep string. -func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { - var err error - um := URLsMap{} - for k, v := range m { - um[k], err = NewURLs(strings.Split(v, sep)) - if err != nil { - return nil, err - } - } - return um, nil -} - -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. -func (c URLsMap) String() string { - var pairs []string - for name, urls := range c { - for _, url := range urls { - pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) - } - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. -func (c URLsMap) URLs() []string { - var urls []string - for _, us := range c { - for _, u := range us { - urls = append(urls, u.String()) - } - } - sort.Strings(urls) - return urls -} - -// Len returns the size of URLsMap. -func (c URLsMap) Len() int { - return len(c) -} - -// parse parses the given string and returns a map listing the values specified for each key. -func parse(s string) map[string][]string { - m := make(map[string][]string) - for s != "" { - key := s - if i := strings.IndexAny(key, ","); i >= 0 { - key, s = key[:i], key[i+1:] - } else { - s = "" - } - if key == "" { - continue - } - value := "" - if i := strings.Index(key, "="); i >= 0 { - key, value = key[:i], key[i+1:] - } - m[key] = append(m[key], value) - } - return m -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2..000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 792994785..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d68..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce945..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f31..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89fc1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7d7..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e3388..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/getkin/kin-openapi/LICENSE b/vendor/github.com/getkin/kin-openapi/LICENSE deleted file mode 100644 index 992b9831e..000000000 --- a/vendor/github.com/getkin/kin-openapi/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017-2018 the project authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/doc.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/doc.go deleted file mode 100644 index e59ec2c34..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package jsoninfo provides information and functions for marshalling/unmarshalling JSON. -package jsoninfo diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/field_info.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/field_info.go deleted file mode 100644 index d949a79d3..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/field_info.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsoninfo - -import ( - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// FieldInfo contains information about JSON serialization of a field. -type FieldInfo struct { - MultipleFields bool // Whether multiple Go fields share this JSON name - HasJSONTag bool - TypeIsMarshaller bool - TypeIsUnmarshaller bool - JSONOmitEmpty bool - JSONString bool - Index []int - Type reflect.Type - JSONName string -} - -func AppendFields(fields []FieldInfo, parentIndex []int, t reflect.Type) []FieldInfo { - // For each field - numField := t.NumField() -iteration: - for i := 0; i < numField; i++ { - f := t.Field(i) - index := make([]int, 0, len(parentIndex)+1) - index = append(index, parentIndex...) - index = append(index, i) - - // See whether this is an embedded field - if f.Anonymous { - if f.Tag.Get("json") == "-" { - continue - } - fields = AppendFields(fields, index, f.Type) - continue iteration - } - - // Ignore certain types - switch f.Type.Kind() { - case reflect.Func, reflect.Chan: - continue iteration - } - - // Is it a private (lowercase) field? - firstRune, _ := utf8.DecodeRuneInString(f.Name) - if unicode.IsLower(firstRune) { - continue iteration - } - - // Declare a field - field := FieldInfo{ - Index: index, - Type: f.Type, - JSONName: f.Name, - } - - // Read "json" tag - jsonTag := f.Tag.Get("json") - - // Read our custom "multijson" tag that - // allows multiple fields with the same name. - if v := f.Tag.Get("multijson"); len(v) > 0 { - field.MultipleFields = true - jsonTag = v - } - - // Handle "-" - if jsonTag == "-" { - continue - } - - // Parse the tag - if len(jsonTag) > 0 { - field.HasJSONTag = true - for i, part := range strings.Split(jsonTag, ",") { - if i == 0 { - if len(part) > 0 { - field.JSONName = part - } - } else { - switch part { - case "omitempty": - field.JSONOmitEmpty = true - case "string": - field.JSONString = true - } - } - } - } - - if _, ok := field.Type.MethodByName("MarshalJSON"); ok { - field.TypeIsMarshaller = true - } - if _, ok := field.Type.MethodByName("UnmarshalJSON"); ok { - field.TypeIsUnmarshaller = true - } - - // Field is done - fields = append(fields, field) - } - - return fields -} - -type sortableFieldInfos []FieldInfo - -func (list sortableFieldInfos) Len() int { - return len(list) -} - -func (list sortableFieldInfos) Less(i, j int) bool { - return list[i].JSONName < list[j].JSONName -} - -func (list sortableFieldInfos) Swap(i, j int) { - a, b := list[i], list[j] - list[i], list[j] = b, a -} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal.go deleted file mode 100644 index 93de99a56..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal.go +++ /dev/null @@ -1,162 +0,0 @@ -package jsoninfo - -import ( - "encoding/json" - "fmt" - "reflect" -) - -// MarshalStrictStruct function: -// * Marshals struct fields, ignoring MarshalJSON() and fields without 'json' tag. -// * Correctly handles StrictStruct semantics. -func MarshalStrictStruct(value StrictStruct) ([]byte, error) { - encoder := NewObjectEncoder() - if err := value.EncodeWith(encoder, value); err != nil { - return nil, err - } - return encoder.Bytes() -} - -type ObjectEncoder struct { - result map[string]json.RawMessage -} - -func NewObjectEncoder() *ObjectEncoder { - return &ObjectEncoder{ - result: make(map[string]json.RawMessage, 8), - } -} - -// Bytes returns the result of encoding. -func (encoder *ObjectEncoder) Bytes() ([]byte, error) { - return json.Marshal(encoder.result) -} - -// EncodeExtension adds a key/value to the current JSON object. -func (encoder *ObjectEncoder) EncodeExtension(key string, value interface{}) error { - data, err := json.Marshal(value) - if err != nil { - return err - } - encoder.result[key] = data - return nil -} - -// EncodeExtensionMap adds all properties to the result. -func (encoder *ObjectEncoder) EncodeExtensionMap(value map[string]json.RawMessage) error { - if value != nil { - result := encoder.result - for k, v := range value { - result[k] = v - } - } - return nil -} - -func (encoder *ObjectEncoder) EncodeStructFieldsAndExtensions(value interface{}) error { - reflection := reflect.ValueOf(value) - - // Follow "encoding/json" semantics - if reflection.Kind() != reflect.Ptr { - // Panic because this is a clear programming error - panic(fmt.Errorf("Value %s is not a pointer", reflection.Type().String())) - } - if reflection.IsNil() { - // Panic because this is a clear programming error - panic(fmt.Errorf("Value %s is nil", reflection.Type().String())) - } - - // Take the element - reflection = reflection.Elem() - - // Obtain typeInfo - typeInfo := GetTypeInfo(reflection.Type()) - - // Declare result - result := encoder.result - - // Supported fields -iteration: - for _, field := range typeInfo.Fields { - // Fields without JSON tag are ignored - if !field.HasJSONTag { - continue - } - - // Marshal - fieldValue := reflection.FieldByIndex(field.Index) - if v, ok := fieldValue.Interface().(json.Marshaler); ok { - if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { - if field.JSONOmitEmpty { - continue iteration - } - result[field.JSONName] = []byte("null") - continue - } - fieldData, err := v.MarshalJSON() - if err != nil { - return err - } - result[field.JSONName] = fieldData - continue - } - switch fieldValue.Kind() { - case reflect.Ptr, reflect.Interface: - if fieldValue.IsNil() { - if field.JSONOmitEmpty { - continue iteration - } - result[field.JSONName] = []byte("null") - continue - } - case reflect.Struct: - case reflect.Map: - if field.JSONOmitEmpty && (fieldValue.IsNil() || fieldValue.Len() == 0) { - continue iteration - } - case reflect.Slice: - if field.JSONOmitEmpty && fieldValue.Len() == 0 { - continue iteration - } - case reflect.Bool: - x := fieldValue.Bool() - if field.JSONOmitEmpty && !x { - continue iteration - } - s := "false" - if x { - s = "true" - } - result[field.JSONName] = []byte(s) - continue iteration - case reflect.Int64, reflect.Int, reflect.Int32: - if field.JSONOmitEmpty && fieldValue.Int() == 0 { - continue iteration - } - case reflect.Uint64, reflect.Uint, reflect.Uint32: - if field.JSONOmitEmpty && fieldValue.Uint() == 0 { - continue iteration - } - case reflect.Float64: - if field.JSONOmitEmpty && fieldValue.Float() == 0.0 { - continue iteration - } - case reflect.String: - if field.JSONOmitEmpty && len(fieldValue.String()) == 0 { - continue iteration - } - default: - panic(fmt.Errorf("Field '%s' has unsupported type %s", field.JSONName, field.Type.String())) - } - - // No special treament is needed - // Use plain old "encoding/json".Marshal - fieldData, err := json.Marshal(fieldValue.Addr().Interface()) - if err != nil { - return err - } - result[field.JSONName] = fieldData - } - - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal_ref.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal_ref.go deleted file mode 100644 index 9738bf08f..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/marshal_ref.go +++ /dev/null @@ -1,30 +0,0 @@ -package jsoninfo - -import ( - "encoding/json" -) - -func MarshalRef(value string, otherwise interface{}) ([]byte, error) { - if len(value) > 0 { - return json.Marshal(&refProps{ - Ref: value, - }) - } - return json.Marshal(otherwise) -} - -func UnmarshalRef(data []byte, destRef *string, destOtherwise interface{}) error { - refProps := &refProps{} - if err := json.Unmarshal(data, refProps); err == nil { - ref := refProps.Ref - if len(ref) > 0 { - *destRef = ref - return nil - } - } - return json.Unmarshal(data, destOtherwise) -} - -type refProps struct { - Ref string `json:"$ref,omitempty"` -} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/strict_struct.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/strict_struct.go deleted file mode 100644 index 6b4d83977..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/strict_struct.go +++ /dev/null @@ -1,6 +0,0 @@ -package jsoninfo - -type StrictStruct interface { - EncodeWith(encoder *ObjectEncoder, value interface{}) error - DecodeWith(decoder *ObjectDecoder, value interface{}) error -} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/type_info.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/type_info.go deleted file mode 100644 index 5d7a4eda4..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/type_info.go +++ /dev/null @@ -1,68 +0,0 @@ -package jsoninfo - -import ( - "reflect" - "sort" - "sync" -) - -var ( - typeInfos = map[reflect.Type]*TypeInfo{} - typeInfosMutex sync.RWMutex -) - -// TypeInfo contains information about JSON serialization of a type -type TypeInfo struct { - Type reflect.Type - Fields []FieldInfo -} - -func GetTypeInfoForValue(value interface{}) *TypeInfo { - return GetTypeInfo(reflect.TypeOf(value)) -} - -// GetTypeInfo returns TypeInfo for the given type. -func GetTypeInfo(t reflect.Type) *TypeInfo { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - typeInfosMutex.RLock() - typeInfo, exists := typeInfos[t] - typeInfosMutex.RUnlock() - if exists { - return typeInfo - } - if t.Kind() != reflect.Struct { - typeInfo = &TypeInfo{ - Type: t, - } - } else { - // Allocate - typeInfo = &TypeInfo{ - Type: t, - Fields: make([]FieldInfo, 0, 16), - } - - // Add fields - typeInfo.Fields = AppendFields(nil, nil, t) - - // Sort fields - sort.Sort(sortableFieldInfos(typeInfo.Fields)) - } - - // Publish - typeInfosMutex.Lock() - typeInfos[t] = typeInfo - typeInfosMutex.Unlock() - return typeInfo -} - -// FieldNames returns all field names -func (typeInfo *TypeInfo) FieldNames() []string { - fields := typeInfo.Fields - names := make([]string, len(fields)) - for i, field := range fields { - names[i] = field.JSONName - } - return names -} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/unmarshal.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/unmarshal.go deleted file mode 100644 index 329718758..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/unmarshal.go +++ /dev/null @@ -1,121 +0,0 @@ -package jsoninfo - -import ( - "encoding/json" - "fmt" - "reflect" -) - -// UnmarshalStrictStruct function: -// * Unmarshals struct fields, ignoring UnmarshalJSON(...) and fields without 'json' tag. -// * Correctly handles StrictStruct -func UnmarshalStrictStruct(data []byte, value StrictStruct) error { - decoder, err := NewObjectDecoder(data) - if err != nil { - return err - } - return value.DecodeWith(decoder, value) -} - -type ObjectDecoder struct { - Data []byte - remainingFields map[string]json.RawMessage -} - -func NewObjectDecoder(data []byte) (*ObjectDecoder, error) { - var remainingFields map[string]json.RawMessage - if err := json.Unmarshal(data, &remainingFields); err != nil { - return nil, fmt.Errorf("Failed to unmarshal extension properties: %v\nInput: %s", err, data) - } - return &ObjectDecoder{ - Data: data, - remainingFields: remainingFields, - }, nil -} - -// DecodeExtensionMap returns all properties that were not decoded previously. -func (decoder *ObjectDecoder) DecodeExtensionMap() map[string]json.RawMessage { - return decoder.remainingFields -} - -func (decoder *ObjectDecoder) DecodeStructFieldsAndExtensions(value interface{}) error { - reflection := reflect.ValueOf(value) - if reflection.Kind() != reflect.Ptr { - panic(fmt.Errorf("Value %T is not a pointer", value)) - } - if reflection.IsNil() { - panic(fmt.Errorf("Value %T is nil", value)) - } - reflection = reflection.Elem() - for (reflection.Kind() == reflect.Interface || reflection.Kind() == reflect.Ptr) && !reflection.IsNil() { - reflection = reflection.Elem() - } - reflectionType := reflection.Type() - if reflectionType.Kind() != reflect.Struct { - panic(fmt.Errorf("Value %T is not a struct", value)) - } - typeInfo := GetTypeInfo(reflectionType) - - // Supported fields - fields := typeInfo.Fields - remainingFields := decoder.remainingFields - for fieldIndex, field := range fields { - // Fields without JSON tag are ignored - if !field.HasJSONTag { - continue - } - - // Get data - fieldData, exists := remainingFields[field.JSONName] - if !exists { - continue - } - - // Unmarshal - if field.TypeIsUnmarshaller { - fieldType := field.Type - isPtr := false - if fieldType.Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - isPtr = true - } - fieldValue := reflect.New(fieldType) - if err := fieldValue.Interface().(json.Unmarshaler).UnmarshalJSON(fieldData); err != nil { - if field.MultipleFields { - i := fieldIndex + 1 - if i < len(fields) && fields[i].JSONName == field.JSONName { - continue - } - } - return fmt.Errorf("Error while unmarshalling property '%s' (%s): %v", - field.JSONName, fieldValue.Type().String(), err) - } - if !isPtr { - fieldValue = fieldValue.Elem() - } - reflection.FieldByIndex(field.Index).Set(fieldValue) - - // Remove the field from remaining fields - delete(remainingFields, field.JSONName) - } else { - fieldPtr := reflection.FieldByIndex(field.Index) - if fieldPtr.Kind() != reflect.Ptr || fieldPtr.IsNil() { - fieldPtr = fieldPtr.Addr() - } - if err := json.Unmarshal(fieldData, fieldPtr.Interface()); err != nil { - if field.MultipleFields { - i := fieldIndex + 1 - if i < len(fields) && fields[i].JSONName == field.JSONName { - continue - } - } - return fmt.Errorf("Error while unmarshalling property '%s' (%s): %v", - field.JSONName, fieldPtr.Type().String(), err) - } - - // Remove the field from remaining fields - delete(remainingFields, field.JSONName) - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/jsoninfo/unsupported_properties_error.go b/vendor/github.com/getkin/kin-openapi/jsoninfo/unsupported_properties_error.go deleted file mode 100644 index 258efef28..000000000 --- a/vendor/github.com/getkin/kin-openapi/jsoninfo/unsupported_properties_error.go +++ /dev/null @@ -1,45 +0,0 @@ -package jsoninfo - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// UnsupportedPropertiesError is a helper for extensions that want to refuse -// unsupported JSON object properties. -// -// It produces a helpful error message. -type UnsupportedPropertiesError struct { - Value interface{} - UnsupportedProperties map[string]json.RawMessage -} - -func NewUnsupportedPropertiesError(v interface{}, m map[string]json.RawMessage) error { - return &UnsupportedPropertiesError{ - Value: v, - UnsupportedProperties: m, - } -} - -func (err *UnsupportedPropertiesError) Error() string { - m := err.UnsupportedProperties - typeInfo := GetTypeInfoForValue(err.Value) - if m == nil || typeInfo == nil { - return "Invalid UnsupportedPropertiesError" - } - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - supported := typeInfo.FieldNames() - if len(supported) == 0 { - return fmt.Sprintf("Type '%T' doesn't take any properties. Unsupported properties: '%s'\n", - err.Value, strings.Join(keys, "', '")) - } - return fmt.Sprintf("Unsupported properties: '%s'\nSupported properties are: '%s'", - strings.Join(keys, "', '"), - strings.Join(supported, "', '")) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/callback.go b/vendor/github.com/getkin/kin-openapi/openapi3/callback.go deleted file mode 100644 index 60196ba16..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/callback.go +++ /dev/null @@ -1,15 +0,0 @@ -package openapi3 - -import "context" - -// Callback is specified by OpenAPI/Swagger standard version 3.0. -type Callback map[string]*PathItem - -func (value Callback) Validate(c context.Context) error { - for _, v := range value { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/components.go b/vendor/github.com/getkin/kin-openapi/openapi3/components.go deleted file mode 100644 index 93a26cfae..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/components.go +++ /dev/null @@ -1,105 +0,0 @@ -package openapi3 - -import ( - "context" - "fmt" - "regexp" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Components is specified by OpenAPI/Swagger standard version 3.0. -type Components struct { - ExtensionProps - Schemas map[string]*SchemaRef `json:"schemas,omitempty"` - Parameters map[string]*ParameterRef `json:"parameters,omitempty"` - Headers map[string]*HeaderRef `json:"headers,omitempty"` - RequestBodies map[string]*RequestBodyRef `json:"requestBodies,omitempty"` - Responses map[string]*ResponseRef `json:"responses,omitempty"` - SecuritySchemes map[string]*SecuritySchemeRef `json:"securitySchemes,omitempty"` - Examples map[string]*ExampleRef `json:"examples,omitempty"` - Tags Tags `json:"tags,omitempty"` - Links map[string]*LinkRef `json:"links,omitempty"` - Callbacks map[string]*CallbackRef `json:"callbacks,omitempty"` -} - -func NewComponents() Components { - return Components{} -} - -func (components *Components) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(components) -} - -func (components *Components) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, components) -} - -func (components *Components) Validate(c context.Context) (err error) { - for k, v := range components.Schemas { - if err = ValidateIdentifier(k); err != nil { - return - } - if err = v.Validate(c); err != nil { - return - } - } - - for k, v := range components.Parameters { - if err = ValidateIdentifier(k); err != nil { - return - } - if err = v.Validate(c); err != nil { - return - } - } - - for k, v := range components.RequestBodies { - if err = ValidateIdentifier(k); err != nil { - return - } - if err = v.Validate(c); err != nil { - return - } - } - - for k, v := range components.Responses { - if err = ValidateIdentifier(k); err != nil { - return - } - if err = v.Validate(c); err != nil { - return - } - } - - for k, v := range components.Headers { - if err = ValidateIdentifier(k); err != nil { - return - } - if err = v.Validate(c); err != nil { - return - } - } - - for k, v := range components.SecuritySchemes { - if err = ValidateIdentifier(k); err != nil { - return - } - if err = v.Validate(c); err != nil { - return - } - } - - return -} - -const identifierPattern = `^[a-zA-Z0-9.\-_]+$` - -var identifierRegExp = regexp.MustCompile(identifierPattern) - -func ValidateIdentifier(value string) error { - if identifierRegExp.MatchString(value) { - return nil - } - return fmt.Errorf("Identifier '%s' is not supported by OpenAPI version 3 standard (regexp: '%s')", value, identifierPattern) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/content.go b/vendor/github.com/getkin/kin-openapi/openapi3/content.go deleted file mode 100644 index 941c032ee..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/content.go +++ /dev/null @@ -1,71 +0,0 @@ -package openapi3 - -import ( - "context" - "strings" -) - -// Content is specified by OpenAPI/Swagger 3.0 standard. -type Content map[string]*MediaType - -func NewContent() Content { - return make(map[string]*MediaType, 4) -} - -func NewContentWithJSONSchema(schema *Schema) Content { - return Content{ - "application/json": NewMediaType().WithSchema(schema), - } -} -func NewContentWithJSONSchemaRef(schema *SchemaRef) Content { - return Content{ - "application/json": NewMediaType().WithSchemaRef(schema), - } -} - -func (content Content) Get(mime string) *MediaType { - // Start by making the most specific match possible - // by using the mime type in full. - if v := content[mime]; v != nil { - return v - } - // If an exact match is not found then we strip all - // metadata from the mime type and only use the x/y - // portion. - i := strings.IndexByte(mime, ';') - if i < 0 { - // If there is no metadata then preserve the full mime type - // string for later wildcard searches. - i = len(mime) - } - mime = mime[:i] - if v := content[mime]; v != nil { - return v - } - // If the x/y pattern has no specific match then we - // try the x/* pattern. - i = strings.IndexByte(mime, '/') - if i < 0 { - // In the case that the given mime type is not valid because it is - // missing the subtype we return nil so that this does not accidentally - // resolve with the wildcard. - return nil - } - mime = mime[:i] + "/*" - if v := content[mime]; v != nil { - return v - } - // Finally, the most generic match of */* is returned - // as a catch-all. - return content["*/*"] -} - -func (content Content) Validate(c context.Context) error { - for _, v := range content { - // Validate MediaType - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/discriminator.go b/vendor/github.com/getkin/kin-openapi/openapi3/discriminator.go deleted file mode 100644 index 03e4d5be4..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/discriminator.go +++ /dev/null @@ -1,26 +0,0 @@ -package openapi3 - -import ( - "context" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Discriminator is specified by OpenAPI/Swagger standard version 3.0. -type Discriminator struct { - ExtensionProps - PropertyName string `json:"propertyName"` - Mapping map[string]string `json:"mapping,omitempty"` -} - -func (value *Discriminator) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(value) -} - -func (value *Discriminator) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, value) -} - -func (value *Discriminator) Validate(c context.Context) error { - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/doc.go b/vendor/github.com/getkin/kin-openapi/openapi3/doc.go deleted file mode 100644 index 9f9554962..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package openapi3 parses and writes OpenAPI 3 specifications. -// -// The OpenAPI 3.0 specification can be found at: -// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.md -package openapi3 diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/encoding.go b/vendor/github.com/getkin/kin-openapi/openapi3/encoding.go deleted file mode 100644 index 6959cb585..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/encoding.go +++ /dev/null @@ -1,93 +0,0 @@ -package openapi3 - -import ( - "context" - "fmt" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Encoding is specified by OpenAPI/Swagger 3.0 standard. -type Encoding struct { - ExtensionProps - - ContentType string `json:"contentType,omitempty"` - Headers map[string]*HeaderRef `json:"headers,omitempty"` - Style string `json:"style,omitempty"` - Explode *bool `json:"explode,omitempty"` - AllowReserved bool `json:"allowReserved,omitempty"` -} - -func NewEncoding() *Encoding { - return &Encoding{} -} - -func (encoding *Encoding) WithHeader(name string, header *Header) *Encoding { - return encoding.WithHeaderRef(name, &HeaderRef{ - Value: header, - }) -} - -func (encoding *Encoding) WithHeaderRef(name string, ref *HeaderRef) *Encoding { - headers := encoding.Headers - if headers == nil { - headers = make(map[string]*HeaderRef) - encoding.Headers = headers - } - headers[name] = ref - return encoding -} - -func (encoding *Encoding) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(encoding) -} - -func (encoding *Encoding) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, encoding) -} - -// SerializationMethod returns a serialization method of request body. -// When serialization method is not defined the method returns the default serialization method. -func (encoding *Encoding) SerializationMethod() *SerializationMethod { - sm := &SerializationMethod{Style: SerializationForm, Explode: true} - if encoding != nil { - if encoding.Style != "" { - sm.Style = encoding.Style - } - if encoding.Explode != nil { - sm.Explode = *encoding.Explode - } - } - return sm -} - -func (encoding *Encoding) Validate(c context.Context) error { - if encoding == nil { - return nil - } - for k, v := range encoding.Headers { - if err := ValidateIdentifier(k); err != nil { - return nil - } - if err := v.Validate(c); err != nil { - return nil - } - } - - // Validate a media types's serialization method. - sm := encoding.SerializationMethod() - switch { - case sm.Style == SerializationForm && sm.Explode, - sm.Style == SerializationForm && !sm.Explode, - sm.Style == SerializationSpaceDelimited && sm.Explode, - sm.Style == SerializationSpaceDelimited && !sm.Explode, - sm.Style == SerializationPipeDelimited && sm.Explode, - sm.Style == SerializationPipeDelimited && !sm.Explode, - sm.Style == SerializationDeepObject && sm.Explode: - // it is a valid - default: - return fmt.Errorf("Serialization method with style=%q and explode=%v is not supported by media type", sm.Style, sm.Explode) - } - - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/examples.go b/vendor/github.com/getkin/kin-openapi/openapi3/examples.go deleted file mode 100644 index 50d6dda93..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/examples.go +++ /dev/null @@ -1,29 +0,0 @@ -package openapi3 - -import ( - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Example is specified by OpenAPI/Swagger 3.0 standard. -type Example struct { - ExtensionProps - - Summary string `json:"summary,omitempty"` - Description string `json:"description,omitempty"` - Value interface{} `json:"value,omitempty"` - ExternalValue string `json:"externalValue,omitempty"` -} - -func NewExample(value interface{}) *Example { - return &Example{ - Value: value, - } -} - -func (example *Example) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(example) -} - -func (example *Example) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, example) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/extension.go b/vendor/github.com/getkin/kin-openapi/openapi3/extension.go deleted file mode 100644 index d6bee2cb0..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/extension.go +++ /dev/null @@ -1,37 +0,0 @@ -package openapi3 - -import ( - "github.com/getkin/kin-openapi/jsoninfo" -) - -// ExtensionProps provides support for OpenAPI extensions. -// It reads/writes all properties that begin with "x-". -type ExtensionProps struct { - Extensions map[string]interface{} `json:"-"` -} - -// Assert that the type implements the interface -var _ jsoninfo.StrictStruct = &ExtensionProps{} - -// EncodeWith will be invoked by package "jsoninfo" -func (props *ExtensionProps) EncodeWith(encoder *jsoninfo.ObjectEncoder, value interface{}) error { - for k, v := range props.Extensions { - if err := encoder.EncodeExtension(k, v); err != nil { - return err - } - } - return encoder.EncodeStructFieldsAndExtensions(value) -} - -// DecodeWith will be invoked by package "jsoninfo" -func (props *ExtensionProps) DecodeWith(decoder *jsoninfo.ObjectDecoder, value interface{}) error { - source := decoder.DecodeExtensionMap() - if len(source) > 0 { - result := make(map[string]interface{}, len(source)) - for k, v := range source { - result[k] = v - } - props.Extensions = result - } - return decoder.DecodeStructFieldsAndExtensions(value) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/external_docs.go b/vendor/github.com/getkin/kin-openapi/openapi3/external_docs.go deleted file mode 100644 index 7f606a43f..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/external_docs.go +++ /dev/null @@ -1,7 +0,0 @@ -package openapi3 - -// ExternalDocs is specified by OpenAPI/Swagger standard version 3.0. -type ExternalDocs struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/header.go b/vendor/github.com/getkin/kin-openapi/openapi3/header.go deleted file mode 100644 index 31571de90..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/header.go +++ /dev/null @@ -1,24 +0,0 @@ -package openapi3 - -import ( - "context" -) - -type Header struct { - ExtensionProps - - // Optional description. Should use CommonMark syntax. - Description string `json:"description,omitempty"` - - // Optional schema - Schema *SchemaRef `json:"schema,omitempty"` -} - -func (value *Header) Validate(c context.Context) error { - if v := value.Schema; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/info.go b/vendor/github.com/getkin/kin-openapi/openapi3/info.go deleted file mode 100644 index 3cff33072..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/info.go +++ /dev/null @@ -1,55 +0,0 @@ -package openapi3 - -import ( - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Info is specified by OpenAPI/Swagger standard version 3.0. -type Info struct { - ExtensionProps - Title string `json:"title,omitempty"` - Description string `json:"description,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *Contact `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -func (value *Info) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(value) -} - -func (value *Info) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, value) -} - -// Contact is specified by OpenAPI/Swagger standard version 3.0. -type Contact struct { - ExtensionProps - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} - -func (value *Contact) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(value) -} - -func (value *Contact) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, value) -} - -// License is specified by OpenAPI/Swagger standard version 3.0. -type License struct { - ExtensionProps - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} - -func (value *License) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(value) -} - -func (value *License) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, value) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/link.go b/vendor/github.com/getkin/kin-openapi/openapi3/link.go deleted file mode 100644 index 59ba87d7b..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/link.go +++ /dev/null @@ -1,29 +0,0 @@ -package openapi3 - -import ( - "context" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Link is specified by OpenAPI/Swagger standard version 3.0. -type Link struct { - ExtensionProps - Description string `json:"description,omitempty"` - Href string `json:"href,omitempty"` - OperationID string `json:"operationId,omitempty"` - Parameters map[string]interface{} `json:"parameters,omitempty"` - Headers map[string]*Schema `json:"headers,omitempty"` -} - -func (value *Link) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(value) -} - -func (value *Link) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, value) -} - -func (value *Link) Validate(c context.Context) error { - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/media_type.go b/vendor/github.com/getkin/kin-openapi/openapi3/media_type.go deleted file mode 100644 index 09713c11b..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/media_type.go +++ /dev/null @@ -1,79 +0,0 @@ -package openapi3 - -import ( - "context" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// MediaType is specified by OpenAPI/Swagger 3.0 standard. -type MediaType struct { - ExtensionProps - - Schema *SchemaRef `json:"schema,omitempty"` - Example interface{} `json:"example,omitempty"` - Examples map[string]*ExampleRef `json:"examples,omitempty"` - Encoding map[string]*Encoding `json:"encoding,omitempty"` -} - -func NewMediaType() *MediaType { - return &MediaType{} -} - -func (mediaType *MediaType) WithSchema(schema *Schema) *MediaType { - if schema == nil { - mediaType.Schema = nil - } else { - mediaType.Schema = &SchemaRef{ - Value: schema, - } - } - return mediaType -} - -func (mediaType *MediaType) WithSchemaRef(schema *SchemaRef) *MediaType { - mediaType.Schema = schema - return mediaType -} - -func (mediaType *MediaType) WithExample(name string, value interface{}) *MediaType { - example := mediaType.Examples - if example == nil { - example = make(map[string]*ExampleRef) - mediaType.Examples = example - } - example[name] = &ExampleRef{ - Value: NewExample(value), - } - return mediaType -} - -func (mediaType *MediaType) WithEncoding(name string, enc *Encoding) *MediaType { - encoding := mediaType.Encoding - if encoding == nil { - encoding = make(map[string]*Encoding) - mediaType.Encoding = encoding - } - encoding[name] = enc - return mediaType -} - -func (mediaType *MediaType) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(mediaType) -} - -func (mediaType *MediaType) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, mediaType) -} - -func (mediaType *MediaType) Validate(c context.Context) error { - if mediaType == nil { - return nil - } - if schema := mediaType.Schema; schema != nil { - if err := schema.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/operation.go b/vendor/github.com/getkin/kin-openapi/openapi3/operation.go deleted file mode 100644 index eb2aa6f1d..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/operation.go +++ /dev/null @@ -1,99 +0,0 @@ -package openapi3 - -import ( - "context" - "strconv" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Operation represents "operation" specified by" OpenAPI/Swagger 3.0 standard. -type Operation struct { - ExtensionProps - - // Optional tags for documentation. - Tags []string `json:"tags,omitempty"` - - // Optional short summary. - Summary string `json:"summary,omitempty"` - - // Optional description. Should use CommonMark syntax. - Description string `json:"description,omitempty"` - - // Optional operation ID. - OperationID string `json:"operationId,omitempty"` - - // Optional parameters. - Parameters Parameters `json:"parameters,omitempty"` - - // Optional body parameter. - RequestBody *RequestBodyRef `json:"requestBody,omitempty"` - - // Optional responses. - Responses Responses `json:"responses,omitempty"` - - // Optional callbacks - Callbacks map[string]*CallbackRef `json:"callbacks,omitempty"` - - Deprecated bool `json:"deprecated,omitempty"` - - // Optional security requirements that overrides top-level security. - Security *SecurityRequirements `json:"security,omitempty"` - - // Optional servers that overrides top-level servers. - Servers *Servers `json:"servers,omitempty"` -} - -func NewOperation() *Operation { - return &Operation{} -} - -func (operation *Operation) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(operation) -} - -func (operation *Operation) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, operation) -} - -func (operation *Operation) AddParameter(p *Parameter) { - operation.Parameters = append(operation.Parameters, &ParameterRef{ - Value: p, - }) -} - -func (operation *Operation) AddResponse(status int, response *Response) { - responses := operation.Responses - if responses == nil { - responses = NewResponses() - operation.Responses = responses - } - if status == 0 { - responses["default"] = &ResponseRef{ - Value: response, - } - } else { - responses[strconv.FormatInt(int64(status), 10)] = &ResponseRef{ - Value: response, - } - } -} - -func (operation *Operation) Validate(c context.Context) error { - if v := operation.Parameters; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - if v := operation.RequestBody; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - if v := operation.Responses; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/parameter.go b/vendor/github.com/getkin/kin-openapi/openapi3/parameter.go deleted file mode 100644 index 32d4bbbeb..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/parameter.go +++ /dev/null @@ -1,228 +0,0 @@ -package openapi3 - -import ( - "context" - "errors" - "fmt" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Parameters is specified by OpenAPI/Swagger 3.0 standard. -type Parameters []*ParameterRef - -func NewParameters() Parameters { - return make(Parameters, 0, 4) -} - -func (parameters Parameters) GetByInAndName(in string, name string) *Parameter { - for _, item := range parameters { - if v := item.Value; v != nil { - if v.Name == name && v.In == in { - return v - } - } - } - return nil -} - -func (parameters Parameters) Validate(c context.Context) error { - m := make(map[string]struct{}) - for _, item := range parameters { - if err := item.Validate(c); err != nil { - return err - } - if v := item.Value; v != nil { - in := v.In - name := v.Name - key := in + ":" + name - if _, exists := m[key]; exists { - return fmt.Errorf("More than one '%s' parameter has name '%s'", in, name) - } - m[key] = struct{}{} - if err := item.Validate(c); err != nil { - return err - } - } - } - return nil -} - -// Parameter is specified by OpenAPI/Swagger 3.0 standard. -type Parameter struct { - ExtensionProps - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Description string `json:"description,omitempty"` - Style string `json:"style,omitempty"` - Explode *bool `json:"explode,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` - AllowReserved bool `json:"allowReserved,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Required bool `json:"required,omitempty"` - Schema *SchemaRef `json:"schema,omitempty"` - Example interface{} `json:"example,omitempty"` - Examples map[string]*ExampleRef `json:"examples,omitempty"` - Content Content `json:"content,omitempty"` -} - -const ( - ParameterInPath = "path" - ParameterInQuery = "query" - ParameterInHeader = "header" - ParameterInCookie = "cookie" -) - -func NewPathParameter(name string) *Parameter { - return &Parameter{ - Name: name, - In: ParameterInPath, - Required: true, - } -} - -func NewQueryParameter(name string) *Parameter { - return &Parameter{ - Name: name, - In: ParameterInQuery, - } -} - -func NewHeaderParameter(name string) *Parameter { - return &Parameter{ - Name: name, - In: ParameterInHeader, - } -} - -func NewCookieParameter(name string) *Parameter { - return &Parameter{ - Name: name, - In: ParameterInCookie, - } -} - -func (parameter *Parameter) WithDescription(value string) *Parameter { - parameter.Description = value - return parameter -} - -func (parameter *Parameter) WithRequired(value bool) *Parameter { - parameter.Required = value - return parameter -} - -func (parameter *Parameter) WithSchema(value *Schema) *Parameter { - if value == nil { - parameter.Schema = nil - } else { - parameter.Schema = &SchemaRef{ - Value: value, - } - } - return parameter -} - -func (parameter *Parameter) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(parameter) -} - -func (parameter *Parameter) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, parameter) -} - -// SerializationMethod returns a parameter's serialization method. -// When a parameter's serialization method is not defined the method returns -// the default serialization method corresponding to a parameter's location. -func (parameter *Parameter) SerializationMethod() (*SerializationMethod, error) { - switch parameter.In { - case ParameterInPath, ParameterInHeader: - style := parameter.Style - if style == "" { - style = SerializationSimple - } - explode := false - if parameter.Explode != nil { - explode = *parameter.Explode - } - return &SerializationMethod{Style: style, Explode: explode}, nil - case ParameterInQuery, ParameterInCookie: - style := parameter.Style - if style == "" { - style = SerializationForm - } - explode := true - if parameter.Explode != nil { - explode = *parameter.Explode - } - return &SerializationMethod{Style: style, Explode: explode}, nil - default: - return nil, fmt.Errorf("unexpected parameter's 'in': %q", parameter.In) - } -} - -func (parameter *Parameter) Validate(c context.Context) error { - if parameter.Name == "" { - return errors.New("Parameter name can't be blank") - } - in := parameter.In - switch in { - case - ParameterInPath, - ParameterInQuery, - ParameterInHeader, - ParameterInCookie: - default: - return fmt.Errorf("Parameter can't have 'in' value '%s'", parameter.In) - } - - // Validate a parameter's serialization method. - sm, err := parameter.SerializationMethod() - if err != nil { - return err - } - var smSupported bool - switch { - case parameter.In == ParameterInPath && sm.Style == SerializationSimple && !sm.Explode, - parameter.In == ParameterInPath && sm.Style == SerializationSimple && sm.Explode, - parameter.In == ParameterInPath && sm.Style == SerializationLabel && !sm.Explode, - parameter.In == ParameterInPath && sm.Style == SerializationLabel && sm.Explode, - parameter.In == ParameterInPath && sm.Style == SerializationMatrix && !sm.Explode, - parameter.In == ParameterInPath && sm.Style == SerializationMatrix && sm.Explode, - - parameter.In == ParameterInQuery && sm.Style == SerializationForm && sm.Explode, - parameter.In == ParameterInQuery && sm.Style == SerializationForm && !sm.Explode, - parameter.In == ParameterInQuery && sm.Style == SerializationSpaceDelimited && sm.Explode, - parameter.In == ParameterInQuery && sm.Style == SerializationSpaceDelimited && !sm.Explode, - parameter.In == ParameterInQuery && sm.Style == SerializationPipeDelimited && sm.Explode, - parameter.In == ParameterInQuery && sm.Style == SerializationPipeDelimited && !sm.Explode, - parameter.In == ParameterInQuery && sm.Style == SerializationDeepObject && sm.Explode, - - parameter.In == ParameterInHeader && sm.Style == SerializationSimple && !sm.Explode, - parameter.In == ParameterInHeader && sm.Style == SerializationSimple && sm.Explode, - - parameter.In == ParameterInCookie && sm.Style == SerializationForm && !sm.Explode, - parameter.In == ParameterInCookie && sm.Style == SerializationForm && sm.Explode: - smSupported = true - } - if !smSupported { - return fmt.Errorf("Parameter '%v' schema is invalid: %v", parameter.Name, - fmt.Errorf("Serialization method with style=%q and explode=%v is not supported by a %s parameter", sm.Style, sm.Explode, in)) - } - - if parameter.Schema != nil && parameter.Content != nil { - return fmt.Errorf("Parameter '%v' schema is invalid: %v", parameter.Name, - errors.New("Cannot contain both schema and content in a parameter")) - } - if schema := parameter.Schema; schema != nil { - if err := schema.Validate(c); err != nil { - return fmt.Errorf("Parameter '%v' schema is invalid: %v", parameter.Name, err) - } - } - if content := parameter.Content; content != nil { - if err := content.Validate(c); err != nil { - return fmt.Errorf("Parameter content is invalid: %v", err) - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/path_item.go b/vendor/github.com/getkin/kin-openapi/openapi3/path_item.go deleted file mode 100644 index fe4ac47e3..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/path_item.go +++ /dev/null @@ -1,124 +0,0 @@ -package openapi3 - -import ( - "context" - "fmt" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -type PathItem struct { - ExtensionProps - Summary string `json:"summary,omitempty"` - Description string `json:"description,omitempty"` - Connect *Operation `json:"connect,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Get *Operation `json:"get,omitempty"` - Head *Operation `json:"head,omitempty"` - Options *Operation `json:"options,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Post *Operation `json:"post,omitempty"` - Put *Operation `json:"put,omitempty"` - Trace *Operation `json:"trace,omitempty"` - Servers Servers `json:"servers,omitempty"` - Parameters Parameters `json:"parameters,omitempty"` -} - -func (pathItem *PathItem) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(pathItem) -} - -func (pathItem *PathItem) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, pathItem) -} - -func (pathItem *PathItem) Operations() map[string]*Operation { - operations := make(map[string]*Operation, 4) - if v := pathItem.Connect; v != nil { - operations["CONNECT"] = v - } - if v := pathItem.Delete; v != nil { - operations["DELETE"] = v - } - if v := pathItem.Get; v != nil { - operations["GET"] = v - } - if v := pathItem.Head; v != nil { - operations["HEAD"] = v - } - if v := pathItem.Options; v != nil { - operations["OPTIONS"] = v - } - if v := pathItem.Patch; v != nil { - operations["PATCH"] = v - } - if v := pathItem.Post; v != nil { - operations["POST"] = v - } - if v := pathItem.Put; v != nil { - operations["PUT"] = v - } - if v := pathItem.Trace; v != nil { - operations["TRACE"] = v - } - return operations -} - -func (pathItem *PathItem) GetOperation(method string) *Operation { - switch method { - case "CONNECT": - return pathItem.Connect - case "DELETE": - return pathItem.Delete - case "GET": - return pathItem.Get - case "HEAD": - return pathItem.Head - case "OPTIONS": - return pathItem.Options - case "PATCH": - return pathItem.Patch - case "POST": - return pathItem.Post - case "PUT": - return pathItem.Put - case "TRACE": - return pathItem.Trace - default: - panic(fmt.Errorf("Unsupported HTTP method '%s'", method)) - } -} - -func (pathItem *PathItem) SetOperation(method string, operation *Operation) { - switch method { - case "CONNECT": - pathItem.Connect = operation - case "DELETE": - pathItem.Delete = operation - case "GET": - pathItem.Get = operation - case "HEAD": - pathItem.Head = operation - case "OPTIONS": - pathItem.Options = operation - case "PATCH": - pathItem.Patch = operation - case "POST": - pathItem.Post = operation - case "PUT": - pathItem.Put = operation - case "TRACE": - pathItem.Trace = operation - default: - panic(fmt.Errorf("Unsupported HTTP method '%s'", method)) - } -} - -func (pathItem *PathItem) Validate(c context.Context) error { - for _, operation := range pathItem.Operations() { - if err := operation.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/paths.go b/vendor/github.com/getkin/kin-openapi/openapi3/paths.go deleted file mode 100644 index 3b46b39a2..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/paths.go +++ /dev/null @@ -1,99 +0,0 @@ -package openapi3 - -import ( - "context" - "fmt" - "strings" -) - -// Paths is specified by OpenAPI/Swagger standard version 3.0. -type Paths map[string]*PathItem - -func (paths Paths) Validate(c context.Context) error { - normalizedPaths := make(map[string]string) - for path, pathItem := range paths { - normalizedPath := normalizePathKey(path) - if oldPath, exists := normalizedPaths[normalizedPath]; exists { - return fmt.Errorf("Conflicting paths '%v' and '%v'", path, oldPath) - } - if path == "" || path[0] != '/' { - return fmt.Errorf("Path '%v' does not start with '/'", path) - } - if strings.Contains(path, "//") { - return fmt.Errorf("Path '%v' contains '//'", path) - } - normalizedPaths[path] = path - if err := pathItem.Validate(c); err != nil { - return err - } - } - return nil -} - -// Find returns a path that matches the key. -// -// The method ignores differences in template variable names (except possible "*" suffix). -// -// For example: -// -// paths := openapi3.Paths { -// "/person/{personName}": &openapi3.PathItem{}, -// } -// pathItem := path.Find("/person/{name}") -// -// would return the correct path item. -func (paths Paths) Find(key string) *PathItem { - // Try directly access the map - pathItem := paths[key] - if pathItem != nil { - return pathItem - } - - // Use normalized keys - normalizedSearchedPath := normalizePathKey(key) - for path, pathItem := range paths { - normalizedPath := normalizePathKey(path) - if normalizedPath == normalizedSearchedPath { - return pathItem - } - } - return nil -} - -func normalizePathKey(key string) string { - // If the argument has no path variables, return the argument - if strings.IndexByte(key, '{') < 0 { - return key - } - - // Allocate buffer - buf := make([]byte, 0, len(key)) - - // Visit each byte - isVariable := false - for i := 0; i < len(key); i++ { - c := key[i] - if isVariable { - if c == '}' { - // End path variables - // First append possible '*' before this character - // The character '}' will be appended - if i > 0 && key[i-1] == '*' { - buf = append(buf, '*') - } - isVariable = false - } else { - // Skip this character - continue - } - } else if c == '{' { - // Begin path variable - // The character '{' will be appended - isVariable = true - } - - // Append the character - buf = append(buf, c) - } - return string(buf) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/refs.go b/vendor/github.com/getkin/kin-openapi/openapi3/refs.go deleted file mode 100644 index 9790b4705..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/refs.go +++ /dev/null @@ -1,199 +0,0 @@ -package openapi3 - -import ( - "context" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -type CallbackRef struct { - Ref string - Value *Callback -} - -func (value *CallbackRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *CallbackRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *CallbackRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type ExampleRef struct { - Ref string - Value *Example -} - -func (value *ExampleRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *ExampleRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *ExampleRef) Validate(c context.Context) error { - return nil -} - -type HeaderRef struct { - Ref string - Value *Header -} - -func (value *HeaderRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *HeaderRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *HeaderRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type LinkRef struct { - Ref string - Value *Link -} - -func (value *LinkRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *LinkRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *LinkRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type ParameterRef struct { - Ref string - Value *Parameter -} - -func (value *ParameterRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *ParameterRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *ParameterRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type ResponseRef struct { - Ref string - Value *Response -} - -func (value *ResponseRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *ResponseRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *ResponseRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type RequestBodyRef struct { - Ref string - Value *RequestBody -} - -func (value *RequestBodyRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *RequestBodyRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *RequestBodyRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type SchemaRef struct { - Ref string - Value *Schema -} - -func NewSchemaRef(ref string, value *Schema) *SchemaRef { - return &SchemaRef{ - Ref: ref, - Value: value, - } -} - -func (value *SchemaRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *SchemaRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *SchemaRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} - -type SecuritySchemeRef struct { - Ref string - Value *SecurityScheme -} - -func (value *SecuritySchemeRef) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalRef(value.Ref, value.Value) -} - -func (value *SecuritySchemeRef) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalRef(data, &value.Ref, &value.Value) -} - -func (value *SecuritySchemeRef) Validate(c context.Context) error { - v := value.Value - if v == nil { - return foundUnresolvedRef(value.Ref) - } - return v.Validate(c) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/request_body.go b/vendor/github.com/getkin/kin-openapi/openapi3/request_body.go deleted file mode 100644 index 1175d6e4d..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/request_body.go +++ /dev/null @@ -1,69 +0,0 @@ -package openapi3 - -import ( - "context" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// RequestBody is specified by OpenAPI/Swagger 3.0 standard. -type RequestBody struct { - ExtensionProps - Description string `json:"description,omitempty"` - Required bool `json:"required,omitempty"` - Content Content `json:"content,omitempty"` -} - -func NewRequestBody() *RequestBody { - return &RequestBody{} -} - -func (requestBody *RequestBody) WithDescription(value string) *RequestBody { - requestBody.Description = value - return requestBody -} - -func (requestBody *RequestBody) WithRequired(value bool) *RequestBody { - requestBody.Required = value - return requestBody -} - -func (requestBody *RequestBody) WithContent(content Content) *RequestBody { - requestBody.Content = content - return requestBody -} - -func (requestBody *RequestBody) WithJSONSchemaRef(value *SchemaRef) *RequestBody { - requestBody.Content = NewContentWithJSONSchemaRef(value) - return requestBody -} - -func (requestBody *RequestBody) WithJSONSchema(value *Schema) *RequestBody { - requestBody.Content = NewContentWithJSONSchema(value) - return requestBody -} - -func (requestBody *RequestBody) GetMediaType(mediaType string) *MediaType { - m := requestBody.Content - if m == nil { - return nil - } - return m[mediaType] -} - -func (requestBody *RequestBody) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(requestBody) -} - -func (requestBody *RequestBody) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, requestBody) -} - -func (requestBody *RequestBody) Validate(c context.Context) error { - if v := requestBody.Content; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/response.go b/vendor/github.com/getkin/kin-openapi/openapi3/response.go deleted file mode 100644 index 10c309e83..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/response.go +++ /dev/null @@ -1,82 +0,0 @@ -package openapi3 - -import ( - "context" - "strconv" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -// Responses is specified by OpenAPI/Swagger 3.0 standard. -type Responses map[string]*ResponseRef - -func NewResponses() Responses { - return make(Responses, 8) -} - -func (responses Responses) Default() *ResponseRef { - return responses["default"] -} - -func (responses Responses) Get(status int) *ResponseRef { - return responses[strconv.FormatInt(int64(status), 10)] -} - -func (responses Responses) Validate(c context.Context) error { - for _, v := range responses { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} - -// Response is specified by OpenAPI/Swagger 3.0 standard. -type Response struct { - ExtensionProps - Description string `json:"description,omitempty"` - Headers map[string]*HeaderRef `json:"headers,omitempty"` - Content Content `json:"content,omitempty"` - Links map[string]*LinkRef `json:"links,omitempty"` -} - -func NewResponse() *Response { - return &Response{} -} - -func (response *Response) WithDescription(value string) *Response { - response.Description = value - return response -} - -func (response *Response) WithContent(content Content) *Response { - response.Content = content - return response -} - -func (response *Response) WithJSONSchema(schema *Schema) *Response { - response.Content = NewContentWithJSONSchema(schema) - return response -} - -func (response *Response) WithJSONSchemaRef(schema *SchemaRef) *Response { - response.Content = NewContentWithJSONSchemaRef(schema) - return response -} - -func (response *Response) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(response) -} - -func (response *Response) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, response) -} - -func (response *Response) Validate(c context.Context) error { - if content := response.Content; content != nil { - if err := content.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/schema.go b/vendor/github.com/getkin/kin-openapi/openapi3/schema.go deleted file mode 100644 index 6fb768d12..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/schema.go +++ /dev/null @@ -1,1222 +0,0 @@ -package openapi3 - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "math" - "math/big" - "regexp" - "strconv" - "unicode/utf16" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -var ( - // SchemaErrorDetailsDisabled disables printing of details about schema errors. - SchemaErrorDetailsDisabled = false - - errSchema = errors.New("Input does not match the schema") - - ErrSchemaInputNaN = errors.New("NaN is not allowed") - ErrSchemaInputInf = errors.New("Inf is not allowed") -) - -// Float64Ptr is a helper for defining OpenAPI schemas. -func Float64Ptr(value float64) *float64 { - return &value -} - -// BoolPtr is a helper for defining OpenAPI schemas. -func BoolPtr(value bool) *bool { - return &value -} - -// Int64Ptr is a helper for defining OpenAPI schemas. -func Int64Ptr(value int64) *int64 { - return &value -} - -// Uint64Ptr is a helper for defining OpenAPI schemas. -func Uint64Ptr(value uint64) *uint64 { - return &value -} - -// Schema is specified by OpenAPI/Swagger 3.0 standard. -type Schema struct { - ExtensionProps - - OneOf []*SchemaRef `json:"oneOf,omitempty"` - AnyOf []*SchemaRef `json:"anyOf,omitempty"` - AllOf []*SchemaRef `json:"allOf,omitempty"` - Not *SchemaRef `json:"not,omitempty"` - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Description string `json:"description,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` - ExternalDocs interface{} `json:"externalDocs,omitempty"` - - // Object-related, here for struct compactness - AdditionalPropertiesAllowed *bool `json:"-" multijson:"additionalProperties,omitempty"` - // Array-related, here for struct compactness - UniqueItems bool `json:"uniqueItems,omitempty"` - // Number-related, here for struct compactness - ExclusiveMin bool `json:"exclusiveMinimum,omitempty"` - ExclusiveMax bool `json:"exclusiveMaximum,omitempty"` - // Properties - Nullable bool `json:"nullable,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - WriteOnly bool `json:"writeOnly,omitempty"` - XML interface{} `json:"xml,omitempty"` - - // Number - Min *float64 `json:"minimum,omitempty"` - Max *float64 `json:"maximum,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - - // String - MinLength uint64 `json:"minLength,omitempty"` - MaxLength *uint64 `json:"maxLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - compiledPattern *compiledPattern - - // Array - MinItems uint64 `json:"minItems,omitempty"` - MaxItems *uint64 `json:"maxItems,omitempty"` - Items *SchemaRef `json:"items,omitempty"` - - // Object - Required []string `json:"required,omitempty"` - Properties map[string]*SchemaRef `json:"properties,omitempty"` - MinProps uint64 `json:"minProperties,omitempty"` - MaxProps *uint64 `json:"maxProperties,omitempty"` - AdditionalProperties *SchemaRef `json:"-" multijson:"additionalProperties,omitempty"` - Discriminator *Discriminator `json:"discriminator,omitempty"` - - PatternProperties string `json:"patternProperties,omitempty"` - compiledPatternProperties *compiledPattern -} - -func NewSchema() *Schema { - return &Schema{} -} - -func (schema *Schema) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(schema) -} - -func (schema *Schema) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, schema) -} - -func (schema *Schema) NewRef() *SchemaRef { - return &SchemaRef{ - Value: schema, - } -} - -func NewOneOfSchema(schemas ...*Schema) *Schema { - refs := make([]*SchemaRef, len(schemas)) - for i, schema := range schemas { - refs[i] = &SchemaRef{Value: schema} - } - return &Schema{ - OneOf: refs, - } -} - -func NewAnyOfSchema(schemas ...*Schema) *Schema { - refs := make([]*SchemaRef, len(schemas)) - for i, schema := range schemas { - refs[i] = &SchemaRef{Value: schema} - } - return &Schema{ - AnyOf: refs, - } -} - -func NewAllOfSchema(schemas ...*Schema) *Schema { - refs := make([]*SchemaRef, len(schemas)) - for i, schema := range schemas { - refs[i] = &SchemaRef{Value: schema} - } - return &Schema{ - AllOf: refs, - } -} - -func NewBoolSchema() *Schema { - return &Schema{ - Type: "boolean", - } -} - -func NewFloat64Schema() *Schema { - return &Schema{ - Type: "number", - } -} - -func NewIntegerSchema() *Schema { - return &Schema{ - Type: "integer", - } -} - -func NewInt32Schema() *Schema { - return &Schema{ - Type: "integer", - Format: "int32", - } -} - -func NewInt64Schema() *Schema { - return &Schema{ - Type: "integer", - Format: "int64", - } -} - -func NewStringSchema() *Schema { - return &Schema{ - Type: "string", - } -} - -func NewDateTimeSchema() *Schema { - return &Schema{ - Type: "string", - Format: "date-time", - } -} - -func NewBytesSchema() *Schema { - return &Schema{ - Type: "string", - Format: "byte", - } -} - -func NewArraySchema() *Schema { - return &Schema{ - Type: "array", - } -} - -func NewObjectSchema() *Schema { - return &Schema{ - Type: "object", - Properties: make(map[string]*SchemaRef), - } -} - -type compiledPattern struct { - Regexp *regexp.Regexp - ErrReason string -} - -func (schema *Schema) WithNullable() *Schema { - schema.Nullable = true - return schema -} - -func (schema *Schema) WithMin(value float64) *Schema { - schema.Min = &value - return schema -} - -func (schema *Schema) WithMax(value float64) *Schema { - schema.Max = &value - return schema -} -func (schema *Schema) WithExclusiveMin(value bool) *Schema { - schema.ExclusiveMin = value - return schema -} - -func (schema *Schema) WithExclusiveMax(value bool) *Schema { - schema.ExclusiveMax = value - return schema -} - -func (schema *Schema) WithEnum(values ...interface{}) *Schema { - schema.Enum = values - return schema -} - -func (schema *Schema) WithFormat(value string) *Schema { - schema.Format = value - return schema -} - -func (schema *Schema) WithLength(i int64) *Schema { - n := uint64(i) - schema.MinLength = n - schema.MaxLength = &n - return schema -} - -func (schema *Schema) WithMinLength(i int64) *Schema { - n := uint64(i) - schema.MinLength = n - return schema -} - -func (schema *Schema) WithMaxLength(i int64) *Schema { - n := uint64(i) - schema.MaxLength = &n - return schema -} - -func (schema *Schema) WithLengthDecodedBase64(i int64) *Schema { - n := uint64(i) - v := (n*8 + 5) / 6 - schema.MinLength = v - schema.MaxLength = &v - return schema -} - -func (schema *Schema) WithMinLengthDecodedBase64(i int64) *Schema { - n := uint64(i) - schema.MinLength = (n*8 + 5) / 6 - return schema -} - -func (schema *Schema) WithMaxLengthDecodedBase64(i int64) *Schema { - n := uint64(i) - schema.MinLength = (n*8 + 5) / 6 - return schema -} - -func (schema *Schema) WithPattern(pattern string) *Schema { - schema.Pattern = pattern - return schema -} - -func (schema *Schema) WithItems(value *Schema) *Schema { - schema.Items = &SchemaRef{ - Value: value, - } - return schema -} - -func (schema *Schema) WithMinItems(i int64) *Schema { - n := uint64(i) - schema.MinItems = n - return schema -} - -func (schema *Schema) WithMaxItems(i int64) *Schema { - n := uint64(i) - schema.MaxItems = &n - return schema -} - -func (schema *Schema) WithUniqueItems(unique bool) *Schema { - schema.UniqueItems = unique - return schema -} - -func (schema *Schema) WithProperty(name string, propertySchema *Schema) *Schema { - return schema.WithPropertyRef(name, &SchemaRef{ - Value: propertySchema, - }) -} - -func (schema *Schema) WithPropertyRef(name string, ref *SchemaRef) *Schema { - properties := schema.Properties - if properties == nil { - properties = make(map[string]*SchemaRef) - schema.Properties = properties - } - properties[name] = ref - return schema -} - -func (schema *Schema) WithProperties(properties map[string]*Schema) *Schema { - result := make(map[string]*SchemaRef, len(properties)) - for k, v := range properties { - result[k] = &SchemaRef{ - Value: v, - } - } - schema.Properties = result - return schema -} - -func (schema *Schema) WithMinProperties(i int64) *Schema { - n := uint64(i) - schema.MinProps = n - return schema -} - -func (schema *Schema) WithMaxProperties(i int64) *Schema { - n := uint64(i) - schema.MaxProps = &n - return schema -} - -func (schema *Schema) WithAnyAdditionalProperties() *Schema { - schema.AdditionalProperties = nil - t := true - schema.AdditionalPropertiesAllowed = &t - return schema -} - -func (schema *Schema) WithAdditionalProperties(v *Schema) *Schema { - if v == nil { - schema.AdditionalProperties = nil - } else { - schema.AdditionalProperties = &SchemaRef{ - Value: v, - } - } - return schema -} - -func (schema *Schema) IsEmpty() bool { - if schema.Type != "" || schema.Format != "" || len(schema.Enum) != 0 || - schema.UniqueItems || schema.ExclusiveMin || schema.ExclusiveMax || - !schema.Nullable || - schema.Min != nil || schema.Max != nil || schema.MultipleOf != nil || - schema.MinLength != 0 || schema.MaxLength != nil || schema.Pattern != "" || - schema.MinItems != 0 || schema.MaxItems != nil || - len(schema.Required) != 0 || - schema.MinProps != 0 || schema.MaxProps != nil { - return false - } - if n := schema.Not; n != nil && !n.Value.IsEmpty() { - return false - } - if ap := schema.AdditionalProperties; ap != nil && !ap.Value.IsEmpty() { - return false - } - if apa := schema.AdditionalPropertiesAllowed; apa != nil && !*apa { - return false - } - if items := schema.Items; items != nil && !items.Value.IsEmpty() { - return false - } - for _, s := range schema.Properties { - if !s.Value.IsEmpty() { - return false - } - } - for _, s := range schema.OneOf { - if !s.Value.IsEmpty() { - return false - } - } - for _, s := range schema.AnyOf { - if !s.Value.IsEmpty() { - return false - } - } - for _, s := range schema.AllOf { - if !s.Value.IsEmpty() { - return false - } - } - return true -} - -func (schema *Schema) Validate(c context.Context) error { - return schema.validate(c, make([]*Schema, 2)) -} - -func (schema *Schema) validate(c context.Context, stack []*Schema) (err error) { - for _, existing := range stack { - if existing == schema { - return - } - } - stack = append(stack, schema) - - for _, item := range schema.OneOf { - v := item.Value - if v == nil { - return foundUnresolvedRef(item.Ref) - } - if err = v.validate(c, stack); err == nil { - return - } - } - - for _, item := range schema.AnyOf { - v := item.Value - if v == nil { - return foundUnresolvedRef(item.Ref) - } - if err = v.validate(c, stack); err != nil { - return - } - } - - for _, item := range schema.AllOf { - v := item.Value - if v == nil { - return foundUnresolvedRef(item.Ref) - } - if err = v.validate(c, stack); err != nil { - return - } - } - - if ref := schema.Not; ref != nil { - v := ref.Value - if v == nil { - return foundUnresolvedRef(ref.Ref) - } - if err = v.validate(c, stack); err != nil { - return - } - } - - schemaType := schema.Type - switch schemaType { - case "": - case "boolean": - case "number": - if format := schema.Format; len(format) > 0 { - switch format { - case "float", "double": - default: - return unsupportedFormat(format) - } - } - case "integer": - if format := schema.Format; len(format) > 0 { - switch format { - case "int32", "int64": - default: - return unsupportedFormat(format) - } - } - case "string": - if format := schema.Format; len(format) > 0 { - switch format { - // Supported by OpenAPIv3.0.1: - case "byte", "binary", "date", "date-time", "password": - // In JSON Draft-07 (not validated yet though): - case "regex": - case "time", "email", "idn-email": - case "hostname", "idn-hostname", "ipv4", "ipv6": - case "uri", "uri-reference", "iri", "iri-reference", "uri-template": - case "json-pointer", "relative-json-pointer": - default: - return unsupportedFormat(format) - } - } - case "array": - if schema.Items == nil { - return errors.New("When schema type is 'array', schema 'items' must be non-null") - } - case "object": - default: - return fmt.Errorf("Unsupported 'type' value '%s'", schemaType) - } - - if ref := schema.Items; ref != nil { - v := ref.Value - if v == nil { - return foundUnresolvedRef(ref.Ref) - } - if err = v.validate(c, stack); err != nil { - return - } - } - - for _, ref := range schema.Properties { - v := ref.Value - if v == nil { - return foundUnresolvedRef(ref.Ref) - } - if err = v.validate(c, stack); err != nil { - return - } - } - - if ref := schema.AdditionalProperties; ref != nil { - v := ref.Value - if v == nil { - return foundUnresolvedRef(ref.Ref) - } - if err = v.validate(c, stack); err != nil { - return - } - } - - return -} - -func (schema *Schema) IsMatching(value interface{}) bool { - return schema.visitJSON(value, true) == nil -} - -func (schema *Schema) IsMatchingJSONBoolean(value bool) bool { - return schema.visitJSON(value, true) == nil -} - -func (schema *Schema) IsMatchingJSONNumber(value float64) bool { - return schema.visitJSON(value, true) == nil -} - -func (schema *Schema) IsMatchingJSONString(value string) bool { - return schema.visitJSON(value, true) == nil -} - -func (schema *Schema) IsMatchingJSONArray(value []interface{}) bool { - return schema.visitJSON(value, true) == nil -} - -func (schema *Schema) IsMatchingJSONObject(value map[string]interface{}) bool { - return schema.visitJSON(value, true) == nil -} - -func (schema *Schema) VisitJSON(value interface{}) error { - return schema.visitJSON(value, false) -} - -func (schema *Schema) visitJSON(value interface{}, fast bool) (err error) { - switch value := value.(type) { - case float64: - if math.IsNaN(value) { - return ErrSchemaInputNaN - } - if math.IsInf(value, 0) { - return ErrSchemaInputInf - } - } - - if schema.IsEmpty() { - return - } - if err = schema.visitSetOperations(value, fast); err != nil { - return - } - - switch value := value.(type) { - case nil: - return schema.visitJSONNull(fast) - case bool: - return schema.visitJSONBoolean(value, fast) - case float64: - return schema.visitJSONNumber(value, fast) - case string: - return schema.visitJSONString(value, fast) - case []interface{}: - return schema.visitJSONArray(value, fast) - case map[string]interface{}: - return schema.visitJSONObject(value, fast) - default: - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "type", - Reason: fmt.Sprintf("Not a JSON value: %T", value), - } - } -} - -func (schema *Schema) visitSetOperations(value interface{}, fast bool) (err error) { - if enum := schema.Enum; len(enum) != 0 { - for _, v := range enum { - if value == v { - return - } - } - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "enum", - Reason: "JSON value is not one of the allowed values", - } - } - - if ref := schema.Not; ref != nil { - v := ref.Value - if v == nil { - return foundUnresolvedRef(ref.Ref) - } - if err := v.visitJSON(value, true); err == nil { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "not", - } - } - } - - if v := schema.OneOf; len(v) > 0 { - ok := 0 - for _, item := range v { - v := item.Value - if v == nil { - return foundUnresolvedRef(item.Ref) - } - if err := v.visitJSON(value, true); err == nil { - ok++ - } - } - if ok != 1 { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "oneOf", - } - } - } - - if v := schema.AnyOf; len(v) > 0 { - ok := false - for _, item := range v { - v := item.Value - if v == nil { - return foundUnresolvedRef(item.Ref) - } - if err := v.visitJSON(value, true); err == nil { - ok = true - break - } - } - if !ok { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "anyOf", - } - } - } - - for _, item := range schema.AllOf { - v := item.Value - if v == nil { - return foundUnresolvedRef(item.Ref) - } - if err := v.visitJSON(value, false); err != nil { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "allOf", - Origin: err, - } - } - } - return -} - -func (schema *Schema) visitJSONNull(fast bool) (err error) { - if schema.Nullable { - return - } - if fast { - return errSchema - } - return &SchemaError{ - Value: nil, - Schema: schema, - SchemaField: "nullable", - Reason: "Value is not nullable", - } -} - -func (schema *Schema) VisitJSONBoolean(value bool) error { - return schema.visitJSONBoolean(value, false) -} - -func (schema *Schema) visitJSONBoolean(value bool, fast bool) (err error) { - if schemaType := schema.Type; schemaType != "" && schemaType != "boolean" { - return schema.expectedType("boolean", fast) - } - return -} - -func (schema *Schema) VisitJSONNumber(value float64) error { - return schema.visitJSONNumber(value, false) -} - -func (schema *Schema) visitJSONNumber(value float64, fast bool) (err error) { - schemaType := schema.Type - if schemaType == "integer" { - if bigFloat := big.NewFloat(value); !bigFloat.IsInt() { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "type", - Reason: "Value must be an integer", - } - } - } else if schemaType != "" && schemaType != "number" { - return schema.expectedType("number, integer", fast) - } - - // "exclusiveMinimum" - if v := schema.ExclusiveMin; v && !(*schema.Min < value) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "exclusiveMinimum", - Reason: fmt.Sprintf("Number must be more than %g", *schema.Min), - } - } - - // "exclusiveMaximum" - if v := schema.ExclusiveMax; v && !(*schema.Max > value) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "exclusiveMaximum", - Reason: fmt.Sprintf("Number must be less than %g", *schema.Max), - } - } - - // "minimum" - if v := schema.Min; v != nil && !(*v <= value) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "minimum", - Reason: fmt.Sprintf("Number must be at least %g", *v), - } - } - - // "maximum" - if v := schema.Max; v != nil && !(*v >= value) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "maximum", - Reason: fmt.Sprintf("Number must be most %g", *v), - } - } - - // "multipleOf" - if v := schema.MultipleOf; v != nil { - // "A numeric instance is valid only if division by this keyword's - // value results in an integer." - if bigFloat := big.NewFloat(value / *v); !bigFloat.IsInt() { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "multipleOf", - } - } - } - return -} - -func (schema *Schema) VisitJSONString(value string) error { - return schema.visitJSONString(value, false) -} - -func (schema *Schema) visitJSONString(value string, fast bool) (err error) { - if schemaType := schema.Type; schemaType != "" && schemaType != "string" { - return schema.expectedType("string", fast) - } - - // "minLength" and "maxLength" - minLength := schema.MinLength - maxLength := schema.MaxLength - if minLength != 0 || maxLength != nil { - // JSON schema string lengths are UTF-16, not UTF-8! - length := int64(0) - for _, r := range value { - if utf16.IsSurrogate(r) { - length += 2 - } else { - length++ - } - } - if minLength != 0 && length < int64(minLength) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "minLength", - Reason: fmt.Sprintf("Minimum string length is %d", minLength), - } - } - if maxLength != nil && length > int64(*maxLength) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "maxLength", - Reason: fmt.Sprintf("Maximum string length is %d", *maxLength), - } - } - } - - // "format" and "pattern" - cp := schema.compiledPattern - if cp == nil { - pattern := schema.Pattern - if v := schema.Pattern; len(v) > 0 { - // Pattern - re, err := regexp.Compile(v) - if err != nil { - return fmt.Errorf("Error while compiling regular expression '%s': %v", pattern, err) - } - cp = &compiledPattern{ - Regexp: re, - ErrReason: "JSON string doesn't match the regular expression '" + v + "'", - } - schema.compiledPattern = cp - } else if v := schema.Format; len(v) > 0 { - // No pattern, but does have a format - re := SchemaStringFormats[v] - if re != nil { - cp = &compiledPattern{ - Regexp: re, - ErrReason: "JSON string doesn't match the format '" + v + " (regular expression `" + re.String() + "`)'", - } - schema.compiledPattern = cp - } - } - } - if cp != nil { - if !cp.Regexp.MatchString(value) { - field := "format" - if schema.Pattern != "" { - field = "pattern" - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: field, - Reason: cp.ErrReason, - } - } - } - return -} - -func (schema *Schema) VisitJSONArray(value []interface{}) error { - return schema.visitJSONArray(value, false) -} - -func (schema *Schema) visitJSONArray(value []interface{}, fast bool) (err error) { - if schemaType := schema.Type; schemaType != "" && schemaType != "array" { - return schema.expectedType("array", fast) - } - - lenValue := int64(len(value)) - - // "minItems" - if v := schema.MinItems; v != 0 && lenValue < int64(v) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "minItems", - Reason: fmt.Sprintf("Minimum number of items is %d", v), - } - } - - // "maxItems" - if v := schema.MaxItems; v != nil && lenValue > int64(*v) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "maxItems", - Reason: fmt.Sprintf("Maximum number of items is %d", *v), - } - } - - // "uniqueItems" - if v := schema.UniqueItems; v && !isSliceOfUniqueItems(value) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "uniqueItems", - Reason: fmt.Sprintf("Duplicate items found"), - } - } - - // "items" - if itemSchemaRef := schema.Items; itemSchemaRef != nil { - itemSchema := itemSchemaRef.Value - if itemSchema == nil { - return foundUnresolvedRef(itemSchemaRef.Ref) - } - for i, item := range value { - if err := itemSchema.VisitJSON(item); err != nil { - return markSchemaErrorIndex(err, i) - } - } - } - return -} - -func (schema *Schema) VisitJSONObject(value map[string]interface{}) error { - return schema.visitJSONObject(value, false) -} - -func (schema *Schema) visitJSONObject(value map[string]interface{}, fast bool) (err error) { - if schemaType := schema.Type; schemaType != "" && schemaType != "object" { - return schema.expectedType("object", fast) - } - - // "properties" - properties := schema.Properties - lenValue := int64(len(value)) - - // "minProperties" - if v := schema.MinProps; v != 0 && lenValue < int64(v) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "minProperties", - Reason: fmt.Sprintf("There must be at least %d properties", v), - } - } - - // "maxProperties" - if v := schema.MaxProps; v != nil && lenValue > int64(*v) { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "maxProperties", - Reason: fmt.Sprintf("There must be at most %d properties", *v), - } - } - - // "patternProperties" - var cp *compiledPattern - patternProperties := schema.PatternProperties - if len(patternProperties) > 0 { - cp = schema.compiledPatternProperties - if cp == nil { - re, err := regexp.Compile(patternProperties) - if err != nil { - return fmt.Errorf("Error while compiling regular expression '%s': %v", patternProperties, err) - } - cp = &compiledPattern{ - Regexp: re, - ErrReason: "JSON property doesn't match the regular expression '" + patternProperties + "'", - } - schema.compiledPatternProperties = cp - } - } - - // "additionalProperties" - var additionalProperties *Schema - if ref := schema.AdditionalProperties; ref != nil { - additionalProperties = ref.Value - } - for k, v := range value { - if properties != nil { - propertyRef := properties[k] - if propertyRef != nil { - p := propertyRef.Value - if p == nil { - return foundUnresolvedRef(propertyRef.Ref) - } - if err := p.VisitJSON(v); err != nil { - if fast { - return errSchema - } - return markSchemaErrorKey(err, k) - } - continue - } - } - allowed := schema.AdditionalPropertiesAllowed - if additionalProperties != nil || allowed == nil || (allowed != nil && *allowed) { - if cp != nil { - if !cp.Regexp.MatchString(k) { - return &SchemaError{ - Schema: schema, - SchemaField: "patternProperties", - Reason: cp.ErrReason, - } - } - } - if additionalProperties != nil { - if err := additionalProperties.VisitJSON(v); err != nil { - if fast { - return errSchema - } - return markSchemaErrorKey(err, k) - } - } - continue - } - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "properties", - Reason: fmt.Sprintf("Property '%s' is unsupported", k), - } - } - for _, k := range schema.Required { - if _, ok := value[k]; !ok { - if fast { - return errSchema - } - return &SchemaError{ - Value: value, - Schema: schema, - SchemaField: "required", - Reason: fmt.Sprintf("Property '%s' is missing", k), - } - } - } - return -} - -func (schema *Schema) expectedType(typ string, fast bool) error { - if fast { - return errSchema - } - return &SchemaError{ - Value: schema.Type, - Schema: schema, - SchemaField: "type", - Reason: "Field must be set to " + typ + " or not be present", - } -} - -type SchemaError struct { - Value interface{} - reversePath []string - Schema *Schema - SchemaField string - Reason string - Origin error -} - -func markSchemaErrorKey(err error, key string) error { - if v, ok := err.(*SchemaError); ok { - v.reversePath = append(v.reversePath, key) - return v - } - return err -} - -func markSchemaErrorIndex(err error, index int) error { - if v, ok := err.(*SchemaError); ok { - v.reversePath = append(v.reversePath, strconv.FormatInt(int64(index), 10)) - return v - } - return err -} - -func (err *SchemaError) JSONPointer() []string { - reversePath := err.reversePath - path := make([]string, len(reversePath)) - for i := range path { - path[i] = reversePath[len(path)-1-i] - } - return path -} - -func (err *SchemaError) Error() string { - if err.Origin != nil { - return err.Origin.Error() - } - - buf := bytes.NewBuffer(make([]byte, 0, 256)) - if len(err.reversePath) > 0 { - buf.WriteString(`Error at "`) - reversePath := err.reversePath - for i := len(reversePath) - 1; i >= 0; i-- { - buf.WriteByte('/') - buf.WriteString(reversePath[i]) - } - buf.WriteString(`":`) - } - reason := err.Reason - if reason == "" { - buf.WriteString(`Doesn't match schema "`) - buf.WriteString(err.SchemaField) - buf.WriteString(`"`) - } else { - buf.WriteString(reason) - } - if !SchemaErrorDetailsDisabled { - buf.WriteString("\nSchema:\n ") - encoder := json.NewEncoder(buf) - encoder.SetIndent(" ", " ") - if err := encoder.Encode(err.Schema); err != nil { - panic(err) - } - buf.WriteString("\nValue:\n ") - if err := encoder.Encode(err.Value); err != nil { - panic(err) - } - } - return buf.String() -} - -func isSliceOfUniqueItems(xs []interface{}) bool { - s := len(xs) - m := make(map[interface{}]struct{}, s) - for _, x := range xs { - m[x] = struct{}{} - } - return s == len(m) -} - -func unsupportedFormat(format string) error { - return fmt.Errorf("Unsupported 'format' value '%s'", format) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/schema_formats.go b/vendor/github.com/getkin/kin-openapi/openapi3/schema_formats.go deleted file mode 100644 index 9b2f3167e..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/schema_formats.go +++ /dev/null @@ -1,33 +0,0 @@ -package openapi3 - -import ( - "fmt" - "regexp" -) - -var SchemaStringFormats = make(map[string]*regexp.Regexp, 8) - -func DefineStringFormat(name string, pattern string) { - re, err := regexp.Compile(pattern) - if err != nil { - err := fmt.Errorf("Format '%v' has invalid pattern '%v': %v", name, pattern, err) - panic(err) - } - SchemaStringFormats[name] = re -} - -func init() { - // This pattern catches only some suspiciously wrong-looking email addresses. - // Use DefineStringFormat(...) if you need something stricter. - DefineStringFormat("email", `^[^@]+@[^@<>",\s]+$`) - - // Base64 - // The pattern supports base64 and b./ase64url. Padding ('=') is supported. - DefineStringFormat("byte", `(^$|^[a-zA-Z0-9+/\-_]*=*$)`) - - // date - DefineStringFormat("date", `^[0-9]{4}-(0[0-9]|10|11|12)-([0-2][0-9]|30|31)$`) - - // date-time - DefineStringFormat("date-time", `^[0-9]{4}-(0[0-9]|10|11|12)-([0-2][0-9]|30|31)T[0-9]{2}:[0-9]{2}:[0-9]{2}(.[0-9]+)?(Z|(\+|-)[0-9]{2}:[0-9]{2})?$`) -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/security_requirements.go b/vendor/github.com/getkin/kin-openapi/openapi3/security_requirements.go deleted file mode 100644 index c2ea70378..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/security_requirements.go +++ /dev/null @@ -1,40 +0,0 @@ -package openapi3 - -import ( - "context" -) - -type SecurityRequirements []SecurityRequirement - -func NewSecurityRequirements() *SecurityRequirements { - return &SecurityRequirements{} -} - -func (srs *SecurityRequirements) With(securityRequirement SecurityRequirement) *SecurityRequirements { - *srs = append(*srs, securityRequirement) - return srs -} - -func (srs SecurityRequirements) Validate(c context.Context) error { - for _, item := range srs { - if err := item.Validate(c); err != nil { - return err - } - } - return nil -} - -type SecurityRequirement map[string][]string - -func NewSecurityRequirement() SecurityRequirement { - return make(SecurityRequirement) -} - -func (security SecurityRequirement) Authenticate(provider string, scopes ...string) SecurityRequirement { - security[provider] = scopes - return security -} - -func (security SecurityRequirement) Validate(c context.Context) error { - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/security_scheme.go b/vendor/github.com/getkin/kin-openapi/openapi3/security_scheme.go deleted file mode 100644 index e7a6cb624..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/security_scheme.go +++ /dev/null @@ -1,220 +0,0 @@ -package openapi3 - -import ( - "context" - "errors" - "fmt" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -type SecurityScheme struct { - ExtensionProps - - Type string `json:"type,omitempty"` - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Scheme string `json:"scheme,omitempty"` - BearerFormat string `json:"bearerFormat,omitempty"` - Flows *OAuthFlows `json:"flows,omitempty"` -} - -func NewSecurityScheme() *SecurityScheme { - return &SecurityScheme{} -} - -func NewCSRFSecurityScheme() *SecurityScheme { - return &SecurityScheme{ - Type: "apiKey", - In: "header", - Name: "X-XSRF-TOKEN", - } -} - -func NewJWTSecurityScheme() *SecurityScheme { - return &SecurityScheme{ - Type: "http", - Scheme: "bearer", - BearerFormat: "JWT", - } -} - -func (ss *SecurityScheme) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(ss) -} - -func (ss *SecurityScheme) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, ss) -} - -func (ss *SecurityScheme) WithType(value string) *SecurityScheme { - ss.Type = value - return ss -} - -func (ss *SecurityScheme) WithDescription(value string) *SecurityScheme { - ss.Description = value - return ss -} - -func (ss *SecurityScheme) WithName(value string) *SecurityScheme { - ss.Name = value - return ss -} - -func (ss *SecurityScheme) WithIn(value string) *SecurityScheme { - ss.In = value - return ss -} - -func (ss *SecurityScheme) WithScheme(value string) *SecurityScheme { - ss.Scheme = value - return ss -} - -func (ss *SecurityScheme) WithBearerFormat(value string) *SecurityScheme { - ss.BearerFormat = value - return ss -} - -func (ss *SecurityScheme) Validate(c context.Context) error { - hasIn := false - hasBearerFormat := false - hasFlow := false - switch ss.Type { - case "apiKey": - hasIn = true - hasBearerFormat = true - case "http": - scheme := ss.Scheme - switch scheme { - case "bearer": - hasBearerFormat = true - case "basic": - default: - return fmt.Errorf("Security scheme of type 'http' has invalid 'scheme' value '%s'", scheme) - } - case "oauth2": - hasFlow = true - case "openIdConnect": - return fmt.Errorf("Support for security schemes with type '%v' has not been implemented", ss.Type) - default: - return fmt.Errorf("Security scheme 'type' can't be '%v'", ss.Type) - } - - // Validate "in" and "name" - if hasIn { - switch ss.In { - case "query", "header", "cookie": - default: - return fmt.Errorf("Security scheme of type 'apiKey' should have 'in'. It can be 'query', 'header' or 'cookie', not '%s'", ss.In) - } - if ss.Name == "" { - return errors.New("Security scheme of type 'apiKey' should have 'name'") - } - } else if len(ss.In) > 0 { - return fmt.Errorf("Security scheme of type '%s' can't have 'in'", ss.Type) - } else if len(ss.Name) > 0 { - return errors.New("Security scheme of type 'apiKey' can't have 'name'") - } - - // Validate "format" - if hasBearerFormat { - switch ss.BearerFormat { - case "", "JWT": - default: - return fmt.Errorf("Security scheme has unsupported 'bearerFormat' value '%s'", ss.BearerFormat) - } - } else if len(ss.BearerFormat) > 0 { - return errors.New("Security scheme of type 'apiKey' can't have 'bearerFormat'") - } - - // Validate "flow" - if hasFlow { - flow := ss.Flows - if flow == nil { - return fmt.Errorf("Security scheme of type '%v' should have 'flows'", ss.Type) - } - if err := flow.Validate(c); err != nil { - return fmt.Errorf("Security scheme 'flow' is invalid: %v", err) - } - } else if ss.Flows != nil { - return fmt.Errorf("Security scheme of type '%s' can't have 'flows'", ss.Type) - } - return nil -} - -type OAuthFlows struct { - ExtensionProps - Implicit *OAuthFlow `json:"implicit,omitempty"` - Password *OAuthFlow `json:"password,omitempty"` - ClientCredentials *OAuthFlow `json:"clientCredentials,omitempty"` - AuthorizationCode *OAuthFlow `json:"authorizationCode,omitempty"` -} - -type oAuthFlowType int - -const ( - oAuthFlowTypeImplicit oAuthFlowType = iota - oAuthFlowTypePassword - oAuthFlowTypeClientCredentials - oAuthFlowAuthorizationCode -) - -func (flows *OAuthFlows) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(flows) -} - -func (flows *OAuthFlows) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, flows) -} - -func (flows *OAuthFlows) Validate(c context.Context) error { - if v := flows.Implicit; v != nil { - return v.Validate(c, oAuthFlowTypeImplicit) - } - if v := flows.Password; v != nil { - return v.Validate(c, oAuthFlowTypePassword) - } - if v := flows.ClientCredentials; v != nil { - return v.Validate(c, oAuthFlowTypeClientCredentials) - } - if v := flows.AuthorizationCode; v != nil { - return v.Validate(c, oAuthFlowAuthorizationCode) - } - return errors.New("No OAuth flow is defined") -} - -type OAuthFlow struct { - ExtensionProps - AuthorizationURL string `json:"authorizationUrl,omitempty"` - TokenURL string `json:"tokenUrl,omitempty"` - RefreshURL string `json:"refreshUrl,omitempty"` - Scopes map[string]string `json:"scopes"` -} - -func (flow *OAuthFlow) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(flow) -} - -func (flow *OAuthFlow) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, flow) -} - -func (flow *OAuthFlow) Validate(c context.Context, typ oAuthFlowType) error { - if typ == oAuthFlowAuthorizationCode || typ == oAuthFlowTypeImplicit { - if v := flow.AuthorizationURL; v == "" { - return errors.New("An OAuth flow is missing 'authorizationUrl in authorizationCode or implicit '") - } - } - if typ != oAuthFlowTypeImplicit { - if v := flow.TokenURL; v == "" { - return errors.New("An OAuth flow is missing 'tokenUrl in not implicit'") - } - } - if v := flow.Scopes; len(v) == 0 { - return errors.New("An OAuth flow is missing 'scopes'") - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/serialization_method.go b/vendor/github.com/getkin/kin-openapi/openapi3/serialization_method.go deleted file mode 100644 index 2ec8bd2db..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/serialization_method.go +++ /dev/null @@ -1,17 +0,0 @@ -package openapi3 - -const ( - SerializationSimple = "simple" - SerializationLabel = "label" - SerializationMatrix = "matrix" - SerializationForm = "form" - SerializationSpaceDelimited = "spaceDelimited" - SerializationPipeDelimited = "pipeDelimited" - SerializationDeepObject = "deepObject" -) - -// SerializationMethod describes a serialization method of HTTP request's parameters and body. -type SerializationMethod struct { - Style string - Explode bool -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/server.go b/vendor/github.com/getkin/kin-openapi/openapi3/server.go deleted file mode 100644 index 7d2ace5c8..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/server.go +++ /dev/null @@ -1,132 +0,0 @@ -package openapi3 - -import ( - "context" - "errors" - "net/url" - "strings" -) - -// Servers is specified by OpenAPI/Swagger standard version 3.0. -type Servers []*Server - -func (servers Servers) Validate(c context.Context) error { - for _, v := range servers { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} - -func (servers Servers) MatchURL(parsedURL *url.URL) (*Server, []string, string) { - rawURL := parsedURL.String() - if i := strings.IndexByte(rawURL, '?'); i >= 0 { - rawURL = rawURL[:i] - } - for _, server := range servers { - pathParams, remaining, ok := server.MatchRawURL(rawURL) - if ok { - return server, pathParams, remaining - } - } - return nil, nil, "" -} - -// Server is specified by OpenAPI/Swagger standard version 3.0. -type Server struct { - URL string `json:"url,omitempty"` - Description string `json:"description,omitempty"` - Variables map[string]*ServerVariable `json:"variables,omitempty"` -} - -func (server Server) ParameterNames() ([]string, error) { - pattern := server.URL - var params []string - for len(pattern) > 0 { - i := strings.IndexByte(pattern, '{') - if i < 0 { - break - } - pattern = pattern[i+1:] - i = strings.IndexByte(pattern, '}') - if i < 0 { - return nil, errors.New("Missing '}'") - } - params = append(params, strings.TrimSpace(pattern[:i])) - pattern = pattern[i+1:] - } - return params, nil -} - -func (server Server) MatchRawURL(input string) ([]string, string, bool) { - pattern := server.URL - var params []string - for len(pattern) > 0 { - c := pattern[0] - if len(pattern) == 1 && c == '/' { - break - } - if c == '{' { - // Find end of pattern - i := strings.IndexByte(pattern, '}') - if i < 0 { - return nil, "", false - } - pattern = pattern[i+1:] - - // Find next '.' or '/' - i = strings.IndexAny(input, "./") - if i < 0 { - i = len(input) - } - params = append(params, input[:i]) - input = input[i:] - continue - } - if len(input) == 0 || input[0] != c { - return nil, "", false - } - pattern = pattern[1:] - input = input[1:] - } - if input == "" { - input = "/" - } - if input[0] != '/' { - return nil, "", false - } - return params, input, true -} - -func (server *Server) Validate(c context.Context) (err error) { - for _, v := range server.Variables { - if err = v.Validate(c); err != nil { - return - } - } - return -} - -// ServerVariable is specified by OpenAPI/Swagger standard version 3.0. -type ServerVariable struct { - Enum []interface{} `json:"enum,omitempty"` - Default interface{} `json:"default,omitempty"` - Description string `json:"description,omitempty"` -} - -func (serverVariable *ServerVariable) Validate(c context.Context) error { - switch serverVariable.Default.(type) { - case float64, string: - default: - return errors.New("Variable 'default' must be either JSON number or JSON string") - } - for _, item := range serverVariable.Enum { - switch item.(type) { - case float64, string: - default: - return errors.New("Every variable 'enum' item must be number of string") - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/swagger.go b/vendor/github.com/getkin/kin-openapi/openapi3/swagger.go deleted file mode 100644 index 3f02de317..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/swagger.go +++ /dev/null @@ -1,71 +0,0 @@ -package openapi3 - -import ( - "context" - - "github.com/getkin/kin-openapi/jsoninfo" -) - -type Swagger struct { - ExtensionProps - OpenAPI string `json:"openapi"` // Required - Info Info `json:"info"` // Required - Servers Servers `json:"servers,omitempty"` - Paths Paths `json:"paths,omitempty"` - Components Components `json:"components,omitempty"` - Security SecurityRequirements `json:"security,omitempty"` - ExternalDocs *ExternalDocs `json:"externalDocs,omitempty"` -} - -func (swagger *Swagger) MarshalJSON() ([]byte, error) { - return jsoninfo.MarshalStrictStruct(swagger) -} - -func (swagger *Swagger) UnmarshalJSON(data []byte) error { - return jsoninfo.UnmarshalStrictStruct(data, swagger) -} - -func (swagger *Swagger) AddOperation(path string, method string, operation *Operation) { - paths := swagger.Paths - if paths == nil { - paths = make(Paths) - swagger.Paths = paths - } - pathItem := paths[path] - if pathItem == nil { - pathItem = &PathItem{} - paths[path] = pathItem - } - pathItem.SetOperation(method, operation) -} - -func (swagger *Swagger) AddServer(server *Server) { - swagger.Servers = append(swagger.Servers, server) -} - -func (swagger *Swagger) Validate(c context.Context) error { - if err := swagger.Components.Validate(c); err != nil { - return err - } - if v := swagger.Security; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - if paths := swagger.Paths; paths != nil { - if err := paths.Validate(c); err != nil { - return err - } - } - if v := swagger.Servers; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - if v := swagger.Paths; v != nil { - if err := v.Validate(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/swagger_loader.go b/vendor/github.com/getkin/kin-openapi/openapi3/swagger_loader.go deleted file mode 100644 index d6674622e..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/swagger_loader.go +++ /dev/null @@ -1,569 +0,0 @@ -package openapi3 - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path" - "strings" - - "github.com/ghodss/yaml" -) - -func foundUnresolvedRef(ref string) error { - return fmt.Errorf("Found unresolved ref: '%s'", ref) -} - -func failedToResolveRefFragment(value string) error { - return fmt.Errorf("Failed to resolve fragment in URI: '%s'", value) -} - -func failedToResolveRefFragmentPart(value string, what string) error { - return fmt.Errorf("Failed to resolve '%s' in fragment in URI: '%s'", what, value) -} - -type SwaggerLoader struct { - IsExternalRefsAllowed bool - Context context.Context - LoadSwaggerFromURIFunc func(loader *SwaggerLoader, url *url.URL) (*Swagger, error) - visited map[interface{}]struct{} -} - -func NewSwaggerLoader() *SwaggerLoader { - return &SwaggerLoader{} -} - -func (swaggerLoader *SwaggerLoader) LoadSwaggerFromURI(location *url.URL) (*Swagger, error) { - f := swaggerLoader.LoadSwaggerFromURIFunc - if f != nil { - return f(swaggerLoader, location) - } - data, err := readUrl(location) - if err != nil { - return nil, err - } - return swaggerLoader.LoadSwaggerFromDataWithPath(data, location) -} - -func readUrl(location *url.URL) ([]byte, error) { - if location.Scheme != "" && location.Host != "" { - resp, err := http.Get(location.String()) - if err != nil { - return nil, err - } - data, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - return nil, err - } - return data, nil - } - if location.Scheme != "" || location.Host != "" || location.RawQuery != "" { - return nil, fmt.Errorf("Unsupported URI: '%s'", location.String()) - } - data, err := ioutil.ReadFile(location.Path) - if err != nil { - return nil, err - } - return data, nil -} - -func (swaggerLoader *SwaggerLoader) LoadSwaggerFromFile(path string) (*Swagger, error) { - f := swaggerLoader.LoadSwaggerFromURIFunc - if f != nil { - return f(swaggerLoader, &url.URL{ - Path: path, - }) - } - data, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - return swaggerLoader.LoadSwaggerFromDataWithPath(data, &url.URL{ - Path: path, - }) -} - -func (swaggerLoader *SwaggerLoader) LoadSwaggerFromData(data []byte) (*Swagger, error) { - swagger := &Swagger{} - if err := yaml.Unmarshal(data, swagger); err != nil { - return nil, err - } - return swagger, swaggerLoader.ResolveRefsIn(swagger, nil) -} - -func (swaggerLoader *SwaggerLoader) LoadSwaggerFromDataWithPath(data []byte, path *url.URL) (*Swagger, error) { - swagger := &Swagger{} - if err := yaml.Unmarshal(data, swagger); err != nil { - return nil, err - } - return swagger, swaggerLoader.ResolveRefsIn(swagger, path) -} - -func (swaggerLoader *SwaggerLoader) ResolveRefsIn(swagger *Swagger, path *url.URL) (err error) { - swaggerLoader.visited = make(map[interface{}]struct{}) - - // Visit all components - components := swagger.Components - for _, component := range components.Headers { - if err = swaggerLoader.resolveHeaderRef(swagger, component, path); err != nil { - return - } - } - for _, component := range components.Parameters { - if err = swaggerLoader.resolveParameterRef(swagger, component, path); err != nil { - return - } - } - for _, component := range components.RequestBodies { - if err = swaggerLoader.resolveRequestBodyRef(swagger, component, path); err != nil { - return - } - } - for _, component := range components.Responses { - if err = swaggerLoader.resolveResponseRef(swagger, component, path); err != nil { - return - } - } - for _, component := range components.Schemas { - if err = swaggerLoader.resolveSchemaRef(swagger, component, path); err != nil { - return - } - } - for _, component := range components.SecuritySchemes { - if err = swaggerLoader.resolveSecuritySchemeRef(swagger, component, path); err != nil { - return - } - } - for _, component := range components.Examples { - if err = swaggerLoader.resolveExampleRef(swagger, component, path); err != nil { - return - } - } - - // Visit all operations - for _, pathItem := range swagger.Paths { - if pathItem == nil { - continue - } - for _, parameter := range pathItem.Parameters { - if err = swaggerLoader.resolveParameterRef(swagger, parameter, path); err != nil { - return - } - } - for _, operation := range pathItem.Operations() { - for _, parameter := range operation.Parameters { - if err = swaggerLoader.resolveParameterRef(swagger, parameter, path); err != nil { - return - } - } - if requestBody := operation.RequestBody; requestBody != nil { - if err = swaggerLoader.resolveRequestBodyRef(swagger, requestBody, path); err != nil { - return - } - } - for _, response := range operation.Responses { - if err = swaggerLoader.resolveResponseRef(swagger, response, path); err != nil { - return - } - } - } - } - - return -} - -func copyURL(basePath *url.URL) (*url.URL, error) { - return url.Parse(basePath.String()) -} - -func join(basePath *url.URL, relativePath *url.URL) (*url.URL, error) { - if basePath == nil { - return relativePath, nil - } - newPath, err := copyURL(basePath) - if err != nil { - return nil, fmt.Errorf("Can't copy path: '%s'", basePath.String()) - } - newPath.Path = path.Join(path.Dir(newPath.Path), relativePath.Path) - return newPath, nil -} - -func resolvePath(basePath *url.URL, componentPath *url.URL) (*url.URL, error) { - if componentPath.Scheme == "" && componentPath.Host == "" { - return join(basePath, componentPath) - } - return componentPath, nil -} - -func (swaggerLoader *SwaggerLoader) resolveComponent(swagger *Swagger, ref string, prefix string, path *url.URL) ( - components *Components, - id string, - componentPath *url.URL, - err error, -) { - componentPath = path - if !strings.HasPrefix(ref, "#") { - if !swaggerLoader.IsExternalRefsAllowed { - return nil, "", nil, fmt.Errorf("Encountered non-allowed external reference: '%s'", ref) - } - parsedURL, err := url.Parse(ref) - if err != nil { - return nil, "", nil, fmt.Errorf("Can't parse reference: '%s': %v", ref, parsedURL) - } - fragment := parsedURL.Fragment - parsedURL.Fragment = "" - - resolvedPath, err := resolvePath(path, parsedURL) - if err != nil { - return nil, "", nil, fmt.Errorf("Error while resolving path: %v", err) - } - - if swagger, err = swaggerLoader.LoadSwaggerFromURI(resolvedPath); err != nil { - return nil, "", nil, fmt.Errorf("Error while resolving reference '%s': %v", ref, err) - } - ref = fmt.Sprintf("#%s", fragment) - componentPath = resolvedPath - } - if !strings.HasPrefix(ref, prefix) { - err := fmt.Errorf("expected prefix '%s' in URI '%s'", prefix, ref) - return nil, "", nil, err - } - id = ref[len(prefix):] - if strings.IndexByte(id, '/') >= 0 { - return nil, "", nil, failedToResolveRefFragmentPart(ref, id) - } - return &swagger.Components, id, componentPath, nil -} - -func (swaggerLoader *SwaggerLoader) resolveHeaderRef(swagger *Swagger, component *HeaderRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - // Resolve ref - const prefix = "#/components/headers/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.Headers - if definitions == nil { - return failedToResolveRefFragment(ref) - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragment(ref) - } - if err := swaggerLoader.resolveHeaderRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - value := component.Value - if value == nil { - return nil - } - if schema := value.Schema; schema != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, schema, path); err != nil { - return err - } - } - return nil -} - -func (swaggerLoader *SwaggerLoader) resolveParameterRef(swagger *Swagger, component *ParameterRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - // Resolve ref - const prefix = "#/components/parameters/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.Parameters - if definitions == nil { - return failedToResolveRefFragmentPart(ref, "parameters") - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragmentPart(ref, id) - } - if err := swaggerLoader.resolveParameterRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - value := component.Value - if value == nil { - return nil - } - if value.Content != nil && value.Schema != nil { - return errors.New("Cannot contain both schema and content in a parameter") - } - for _, contentType := range value.Content { - if schema := contentType.Schema; schema != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, schema, path); err != nil { - return err - } - } - } - if schema := value.Schema; schema != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, schema, path); err != nil { - return err - } - } - return nil -} - -func (swaggerLoader *SwaggerLoader) resolveRequestBodyRef(swagger *Swagger, component *RequestBodyRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - // Resolve ref - const prefix = "#/components/requestBodies/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.RequestBodies - if definitions == nil { - return failedToResolveRefFragmentPart(ref, "requestBodies") - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragmentPart(ref, id) - } - if err = swaggerLoader.resolveRequestBodyRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - value := component.Value - if value == nil { - return nil - } - for _, contentType := range value.Content { - for name, example := range contentType.Examples { - if err := swaggerLoader.resolveExampleRef(swagger, example, path); err != nil { - return err - } - contentType.Examples[name] = example - } - if schema := contentType.Schema; schema != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, schema, path); err != nil { - return err - } - } - } - return nil -} - -func (swaggerLoader *SwaggerLoader) resolveResponseRef(swagger *Swagger, component *ResponseRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - // Resolve ref - const prefix = "#/components/responses/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.Responses - if definitions == nil { - return failedToResolveRefFragmentPart(ref, "responses") - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragmentPart(ref, id) - } - if err := swaggerLoader.resolveResponseRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - value := component.Value - if value == nil { - return nil - } - for _, header := range value.Headers { - if err := swaggerLoader.resolveHeaderRef(swagger, header, path); err != nil { - return err - } - } - for _, contentType := range value.Content { - if contentType == nil { - continue - } - for name, example := range contentType.Examples { - if err := swaggerLoader.resolveExampleRef(swagger, example, path); err != nil { - return err - } - contentType.Examples[name] = example - } - if schema := contentType.Schema; schema != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, schema, path); err != nil { - return err - } - contentType.Schema = schema - } - } - return nil -} - -func (swaggerLoader *SwaggerLoader) resolveSchemaRef(swagger *Swagger, component *SchemaRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - // Resolve ref - const prefix = "#/components/schemas/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.Schemas - if definitions == nil { - return failedToResolveRefFragmentPart(ref, "schemas") - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragmentPart(ref, id) - } - if err := swaggerLoader.resolveSchemaRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - value := component.Value - if value == nil { - return nil - } - - // ResolveRefs referred schemas - if v := value.Items; v != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - for _, v := range value.Properties { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - if v := value.AdditionalProperties; v != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - if v := value.Not; v != nil { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - for _, v := range value.AllOf { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - for _, v := range value.AnyOf { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - for _, v := range value.OneOf { - if err := swaggerLoader.resolveSchemaRef(swagger, v, path); err != nil { - return err - } - } - - return nil -} - -func (swaggerLoader *SwaggerLoader) resolveSecuritySchemeRef(swagger *Swagger, component *SecuritySchemeRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - // Resolve ref - const prefix = "#/components/securitySchemes/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.SecuritySchemes - if definitions == nil { - return failedToResolveRefFragmentPart(ref, "securitySchemes") - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragmentPart(ref, id) - } - if err := swaggerLoader.resolveSecuritySchemeRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - return nil -} - -func (swaggerLoader *SwaggerLoader) resolveExampleRef(swagger *Swagger, component *ExampleRef, path *url.URL) error { - // Prevent infinite recursion - visited := swaggerLoader.visited - if _, isVisited := visited[component]; isVisited { - return nil - } - visited[component] = struct{}{} - - const prefix = "#/components/examples/" - if ref := component.Ref; len(ref) > 0 { - components, id, componentPath, err := swaggerLoader.resolveComponent(swagger, ref, prefix, path) - if err != nil { - return err - } - definitions := components.Examples - if definitions == nil { - return failedToResolveRefFragmentPart(ref, "examples") - } - resolved := definitions[id] - if resolved == nil { - return failedToResolveRefFragmentPart(ref, id) - } - if err := swaggerLoader.resolveExampleRef(swagger, resolved, componentPath); err != nil { - return err - } - component.Value = resolved.Value - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3/tag.go b/vendor/github.com/getkin/kin-openapi/openapi3/tag.go deleted file mode 100644 index 311a9fd59..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3/tag.go +++ /dev/null @@ -1,20 +0,0 @@ -package openapi3 - -// Tags is specified by OpenAPI/Swagger 3.0 standard. -type Tags []*Tag - -func (tags Tags) Get(name string) *Tag { - for _, tag := range tags { - if tag.Name == name { - return tag - } - } - return nil -} - -// Tag is specified by OpenAPI/Swagger 3.0 standard. -type Tag struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - ExternalDocs *ExternalDocs `json:"externalDocs,omitempty"` -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/authentication_input.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/authentication_input.go deleted file mode 100644 index bae7c43d3..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/authentication_input.go +++ /dev/null @@ -1,34 +0,0 @@ -package openapi3filter - -import ( - "fmt" - "strings" - - "github.com/getkin/kin-openapi/openapi3" -) - -type AuthenticationInput struct { - RequestValidationInput *RequestValidationInput - SecuritySchemeName string - SecurityScheme *openapi3.SecurityScheme - Scopes []string -} - -func (input *AuthenticationInput) NewError(err error) error { - if err == nil { - scopes := input.Scopes - if len(scopes) == 0 { - err = fmt.Errorf("Security requirement '%s' failed", - input.SecuritySchemeName) - } else { - err = fmt.Errorf("Security requirement '%s' (scopes: '%s') failed", - input.SecuritySchemeName, - strings.Join(input.Scopes, "', '")) - } - } - return &RequestError{ - Input: input.RequestValidationInput, - Reason: "Authorization failed", - Err: err, - } -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/errors.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/errors.go deleted file mode 100644 index 9b46ebda6..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/errors.go +++ /dev/null @@ -1,86 +0,0 @@ -package openapi3filter - -import ( - "errors" - "fmt" - "net/http" - - "github.com/getkin/kin-openapi/openapi3" -) - -var ( - errRouteMissingSwagger = errors.New("Route is missing OpenAPI specification") - errRouteMissingOperation = errors.New("Route is missing OpenAPI operation") - ErrAuthenticationServiceMissing = errors.New("Request validator doesn't have an authentication service defined") -) - -type RouteError struct { - Route Route - Reason string -} - -func (err *RouteError) Error() string { - return err.Reason -} - -type RequestError struct { - Input *RequestValidationInput - Parameter *openapi3.Parameter - RequestBody *openapi3.RequestBody - Status int - Reason string - Err error -} - -func (err *RequestError) HTTPStatus() int { - status := err.Status - if status == 0 { - status = http.StatusBadRequest - } - return status -} - -func (err *RequestError) Error() string { - reason := err.Reason - if e := err.Err; e != nil { - if len(reason) == 0 { - reason = e.Error() - } else { - reason += ": " + e.Error() - } - } - if v := err.Parameter; v != nil { - return fmt.Sprintf("Parameter '%s' in %s has an error: %s", v.Name, v.In, reason) - } else if v := err.RequestBody; v != nil { - return fmt.Sprintf("Request body has an error: %s", reason) - } else { - return reason - } -} - -type ResponseError struct { - Input *ResponseValidationInput - Reason string - Err error -} - -func (err *ResponseError) Error() string { - reason := err.Reason - if e := err.Err; e != nil { - if len(reason) == 0 { - reason = e.Error() - } else { - reason += ": " + e.Error() - } - } - return reason -} - -type SecurityRequirementsError struct { - SecurityRequirements openapi3.SecurityRequirements - Errors []error -} - -func (err *SecurityRequirementsError) Error() string { - return "Security requirements failed" -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/internal.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/internal.go deleted file mode 100644 index facaf1de5..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/internal.go +++ /dev/null @@ -1,13 +0,0 @@ -package openapi3filter - -import ( - "strings" -) - -func parseMediaType(contentType string) string { - i := strings.IndexByte(contentType, ';') - if i < 0 { - return contentType - } - return contentType[:i] -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/options.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/options.go deleted file mode 100644 index 510b77756..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/options.go +++ /dev/null @@ -1,14 +0,0 @@ -package openapi3filter - -import ( - "context" -) - -var DefaultOptions = &Options{} - -type Options struct { - ExcludeRequestBody bool - ExcludeResponseBody bool - IncludeResponseStatus bool - AuthenticationFunc func(c context.Context, input *AuthenticationInput) error -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/req_resp_decoder.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/req_resp_decoder.go deleted file mode 100644 index 2a5f48996..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/req_resp_decoder.go +++ /dev/null @@ -1,906 +0,0 @@ -package openapi3filter - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/getkin/kin-openapi/openapi3" -) - -// ParseErrorKind describes a kind of ParseError. -// The type simplifies comparison of errors. -type ParseErrorKind int - -const ( - // KindOther describes an untyped parsing error. - KindOther ParseErrorKind = iota - // KindUnsupportedFormat describes an error that happens when a value has an unsupported format. - KindUnsupportedFormat - // KindInvalidFormat describes an error that happens when a value does not conform a format - // that is required by a serialization method. - KindInvalidFormat -) - -// ParseError describes errors which happens while parse operation's parameters, requestBody, or response. -type ParseError struct { - Kind ParseErrorKind - Value interface{} - Reason string - Cause error - - path []interface{} -} - -func (e *ParseError) Error() string { - var msg []string - if p := e.Path(); len(p) > 0 { - var arr []string - for _, v := range p { - arr = append(arr, fmt.Sprintf("%v", v)) - } - msg = append(msg, fmt.Sprintf("path %v", strings.Join(arr, "."))) - } - msg = append(msg, e.innerError()) - return strings.Join(msg, ": ") -} - -func (e *ParseError) innerError() string { - var msg []string - if e.Value != nil { - msg = append(msg, fmt.Sprintf("value %v", e.Value)) - } - if e.Reason != "" { - msg = append(msg, e.Reason) - } - if e.Cause != nil { - if v, ok := e.Cause.(*ParseError); ok { - msg = append(msg, v.innerError()) - } else { - msg = append(msg, e.Cause.Error()) - } - } - return strings.Join(msg, ": ") -} - -// RootCause returns a root cause of ParseError. -func (e *ParseError) RootCause() error { - if v, ok := e.Cause.(*ParseError); ok { - return v.RootCause() - } - return e.Cause -} - -// Path returns a path to the root cause. -func (e *ParseError) Path() []interface{} { - var path []interface{} - if v, ok := e.Cause.(*ParseError); ok { - p := v.Path() - if len(p) > 0 { - path = append(path, p...) - } - } - if len(e.path) > 0 { - path = append(path, e.path...) - } - return path -} - -func invalidSerializationMethodErr(sm *openapi3.SerializationMethod) error { - return fmt.Errorf("invalid serialization method: style=%q, explode=%v", sm.Style, sm.Explode) -} - -// Decodes a parameter defined via the content property as an object. It uses -// the user specified decoder, or our build-in decoder for application/json -func decodeContentParameter(param *openapi3.Parameter, input *RequestValidationInput) ( - value interface{}, schema *openapi3.Schema, err error) { - - paramValues := make([]string, 1) - var found bool - switch param.In { - case openapi3.ParameterInPath: - paramValues[0], found = input.PathParams[param.Name] - case openapi3.ParameterInQuery: - paramValues, found = input.GetQueryParams()[param.Name] - case openapi3.ParameterInHeader: - paramValues[0] = input.Request.Header.Get(http.CanonicalHeaderKey(param.Name)) - found = paramValues[0] != "" - case openapi3.ParameterInCookie: - var cookie *http.Cookie - cookie, err = input.Request.Cookie(param.Name) - if err == http.ErrNoCookie { - found = false - } else if err != nil { - return - } else { - paramValues[0] = cookie.Value - found = true - } - default: - err = fmt.Errorf("unsupported parameter's 'in': %s", param.In) - return - } - - if !found { - if param.Required { - err = fmt.Errorf("parameter '%s' is required, but missing", param.Name) - } - return - } - - decoder := input.ParamDecoder - if decoder == nil { - decoder = defaultContentParameterDecoder - } - - value, schema, err = decoder(param, paramValues) - return -} - -func defaultContentParameterDecoder(param *openapi3.Parameter, values []string) ( - outValue interface{}, outSchema *openapi3.Schema, err error) { - // Only query parameters can have multiple values. - if len(values) > 1 && param.In != openapi3.ParameterInQuery { - err = fmt.Errorf("%s parameter '%s' can't have multiple values", param.In, param.Name) - return - } - - content := param.Content - if content == nil { - err = fmt.Errorf("parameter '%s' expected to have content", param.Name) - return - } - - // We only know how to decode a parameter if it has one content, application/json - if len(content) != 1 { - err = fmt.Errorf("multiple content types for parameter '%s'", - param.Name) - return - } - - mt := content.Get("application/json") - if mt == nil { - err = fmt.Errorf("parameter '%s' has no json content schema", param.Name) - return - } - outSchema = mt.Schema.Value - - if len(values) == 1 { - err = json.Unmarshal([]byte(values[0]), &outValue) - if err != nil { - err = fmt.Errorf("error unmarshaling parameter '%s' as json", param.Name) - return - } - } else { - outArray := make([]interface{}, len(values)) - for i, v := range values { - err = json.Unmarshal([]byte(v), &outArray[i]) - if err != nil { - err = fmt.Errorf("error unmarshaling parameter '%s' as json", param.Name) - return - } - } - outValue = outArray - } - return -} - -type valueDecoder interface { - DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) - DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) - DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) -} - -// decodeStyledParameter returns a value of an operation's parameter from HTTP request for -// parameters defined using the style format. -// The function returns ParseError when HTTP request contains an invalid value of a parameter. -func decodeStyledParameter(param *openapi3.Parameter, input *RequestValidationInput) (interface{}, error) { - sm, err := param.SerializationMethod() - if err != nil { - return nil, err - } - - var dec valueDecoder - switch param.In { - case openapi3.ParameterInPath: - dec = &pathParamDecoder{pathParams: input.PathParams} - case openapi3.ParameterInQuery: - dec = &urlValuesDecoder{values: input.GetQueryParams()} - case openapi3.ParameterInHeader: - dec = &headerParamDecoder{header: input.Request.Header} - case openapi3.ParameterInCookie: - dec = &cookieParamDecoder{req: input.Request} - default: - return nil, fmt.Errorf("unsupported parameter's 'in': %s", param.In) - } - - return decodeValue(dec, param.Name, sm, param.Schema) -} - -func decodeValue(dec valueDecoder, param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - var decodeFn func(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) - switch schema.Value.Type { - case "array": - decodeFn = func(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - return dec.DecodeArray(param, sm, schema) - } - case "object": - decodeFn = func(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - return dec.DecodeObject(param, sm, schema) - } - default: - decodeFn = dec.DecodePrimitive - } - - return decodeFn(param, sm, schema) -} - -// pathParamDecoder decodes values of path parameters. -type pathParamDecoder struct { - pathParams map[string]string -} - -func (d *pathParamDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - var prefix string - switch sm.Style { - case "simple": - // A prefix is empty for style "simple". - case "label": - prefix = "." - case "matrix": - prefix = ";" + param + "=" - default: - return nil, invalidSerializationMethodErr(sm) - } - - if d.pathParams == nil { - // HTTP request does not contains a value of the target path parameter. - return nil, nil - } - raw, ok := d.pathParams[paramKey(param, sm)] - if !ok || raw == "" { - // HTTP request does not contains a value of the target path parameter. - return nil, nil - } - src, err := cutPrefix(raw, prefix) - if err != nil { - return nil, err - } - return parsePrimitive(src, schema) -} - -func (d *pathParamDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { - var prefix, delim string - switch { - case sm.Style == "simple": - delim = "," - case sm.Style == "label" && sm.Explode == false: - prefix = "." - delim = "," - case sm.Style == "label" && sm.Explode == true: - prefix = "." - delim = "." - case sm.Style == "matrix" && sm.Explode == false: - prefix = ";" + param + "=" - delim = "," - case sm.Style == "matrix" && sm.Explode == true: - prefix = ";" + param + "=" - delim = ";" + param + "=" - default: - return nil, invalidSerializationMethodErr(sm) - } - - if d.pathParams == nil { - // HTTP request does not contains a value of the target path parameter. - return nil, nil - } - raw, ok := d.pathParams[paramKey(param, sm)] - if !ok || raw == "" { - // HTTP request does not contains a value of the target path parameter. - return nil, nil - } - src, err := cutPrefix(raw, prefix) - if err != nil { - return nil, err - } - return parseArray(strings.Split(src, delim), schema) -} - -func (d *pathParamDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { - var prefix, propsDelim, valueDelim string - switch { - case sm.Style == "simple" && sm.Explode == false: - propsDelim = "," - valueDelim = "," - case sm.Style == "simple" && sm.Explode == true: - propsDelim = "," - valueDelim = "=" - case sm.Style == "label" && sm.Explode == false: - prefix = "." - propsDelim = "," - valueDelim = "," - case sm.Style == "label" && sm.Explode == true: - prefix = "." - propsDelim = "." - valueDelim = "=" - case sm.Style == "matrix" && sm.Explode == false: - prefix = ";" + param + "=" - propsDelim = "," - valueDelim = "," - case sm.Style == "matrix" && sm.Explode == true: - prefix = ";" - propsDelim = ";" - valueDelim = "=" - default: - return nil, invalidSerializationMethodErr(sm) - } - - if d.pathParams == nil { - // HTTP request does not contains a value of the target path parameter. - return nil, nil - } - raw, ok := d.pathParams[paramKey(param, sm)] - if !ok || raw == "" { - // HTTP request does not contains a value of the target path parameter. - return nil, nil - } - src, err := cutPrefix(raw, prefix) - if err != nil { - return nil, err - } - props, err := propsFromString(src, propsDelim, valueDelim) - if err != nil { - return nil, err - } - return makeObject(props, schema) -} - -// paramKey returns a key to get a raw value of a path parameter. -func paramKey(param string, sm *openapi3.SerializationMethod) string { - switch sm.Style { - case "label": - return "." + param - case "matrix": - return ";" + param - default: - return param - } -} - -// cutPrefix validates that a raw value of a path parameter has the specified prefix, -// and returns a raw value without the prefix. -func cutPrefix(raw, prefix string) (string, error) { - if prefix == "" { - return raw, nil - } - if len(raw) < len(prefix) || raw[:len(prefix)] != prefix { - return "", &ParseError{ - Kind: KindInvalidFormat, - Value: raw, - Reason: fmt.Sprintf("a value must be prefixed with %q", prefix), - } - } - return raw[len(prefix):], nil -} - -// urlValuesDecoder decodes values of query parameters. -type urlValuesDecoder struct { - values url.Values -} - -func (d *urlValuesDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - if sm.Style != "form" { - return nil, invalidSerializationMethodErr(sm) - } - - values := d.values[param] - if len(values) == 0 { - // HTTP request does not contain a value of the target query parameter. - return nil, nil - } - return parsePrimitive(values[0], schema) -} - -func (d *urlValuesDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { - if sm.Style == "deepObject" { - return nil, invalidSerializationMethodErr(sm) - } - - values := d.values[param] - if len(values) == 0 { - // HTTP request does not contain a value of the target query parameter. - return nil, nil - } - if !sm.Explode { - var delim string - switch sm.Style { - case "form": - delim = "," - case "spaceDelimited": - delim = " " - case "pipeDelimited": - delim = "|" - } - values = strings.Split(values[0], delim) - } - return parseArray(values, schema) -} - -func (d *urlValuesDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { - var propsFn func(url.Values) (map[string]string, error) - switch sm.Style { - case "form": - propsFn = func(params url.Values) (map[string]string, error) { - if len(params) == 0 { - // HTTP request does not contain query parameters. - return nil, nil - } - if sm.Explode { - props := make(map[string]string) - for key, values := range params { - props[key] = values[0] - } - return props, nil - } - values := params[param] - if len(values) == 0 { - // HTTP request does not contain a value of the target query parameter. - return nil, nil - } - return propsFromString(values[0], ",", ",") - } - case "deepObject": - propsFn = func(params url.Values) (map[string]string, error) { - props := make(map[string]string) - for key, values := range params { - groups := regexp.MustCompile(fmt.Sprintf("%s\\[(.+?)\\]", param)).FindAllStringSubmatch(key, -1) - if len(groups) == 0 { - // A query parameter's name does not match the required format, so skip it. - continue - } - props[groups[0][1]] = values[0] - } - if len(props) == 0 { - // HTTP request does not contain query parameters encoded by rules of style "deepObject". - return nil, nil - } - return props, nil - } - default: - return nil, invalidSerializationMethodErr(sm) - } - - props, err := propsFn(d.values) - if err != nil { - return nil, err - } - if props == nil { - return nil, nil - } - return makeObject(props, schema) -} - -// headerParamDecoder decodes values of header parameters. -type headerParamDecoder struct { - header http.Header -} - -func (d *headerParamDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - if sm.Style != "simple" { - return nil, invalidSerializationMethodErr(sm) - } - - raw := d.header.Get(http.CanonicalHeaderKey(param)) - return parsePrimitive(raw, schema) -} - -func (d *headerParamDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { - if sm.Style != "simple" { - return nil, invalidSerializationMethodErr(sm) - } - - raw := d.header.Get(http.CanonicalHeaderKey(param)) - if raw == "" { - // HTTP request does not contains a corresponding header - return nil, nil - } - return parseArray(strings.Split(raw, ","), schema) -} - -func (d *headerParamDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { - if sm.Style != "simple" { - return nil, invalidSerializationMethodErr(sm) - } - valueDelim := "," - if sm.Explode { - valueDelim = "=" - } - - raw := d.header.Get(http.CanonicalHeaderKey(param)) - if raw == "" { - // HTTP request does not contain a corresponding header. - return nil, nil - } - props, err := propsFromString(raw, ",", valueDelim) - if err != nil { - return nil, err - } - return makeObject(props, schema) -} - -// cookieParamDecoder decodes values of cookie parameters. -type cookieParamDecoder struct { - req *http.Request -} - -func (d *cookieParamDecoder) DecodePrimitive(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (interface{}, error) { - if sm.Style != "form" { - return nil, invalidSerializationMethodErr(sm) - } - - cookie, err := d.req.Cookie(param) - if err == http.ErrNoCookie { - // HTTP request does not contain a corresponding cookie. - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("decode param %q: %s", param, err) - } - return parsePrimitive(cookie.Value, schema) -} - -func (d *cookieParamDecoder) DecodeArray(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) ([]interface{}, error) { - if sm.Style != "form" || sm.Explode { - return nil, invalidSerializationMethodErr(sm) - } - - cookie, err := d.req.Cookie(param) - if err == http.ErrNoCookie { - // HTTP request does not contain a corresponding cookie. - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("decode param %q: %s", param, err) - } - return parseArray(strings.Split(cookie.Value, ","), schema) -} - -func (d *cookieParamDecoder) DecodeObject(param string, sm *openapi3.SerializationMethod, schema *openapi3.SchemaRef) (map[string]interface{}, error) { - if sm.Style != "form" || sm.Explode { - return nil, invalidSerializationMethodErr(sm) - } - - cookie, err := d.req.Cookie(param) - if err == http.ErrNoCookie { - // HTTP request does not contain a corresponding cookie. - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("decode param %q: %s", param, err) - } - props, err := propsFromString(cookie.Value, ",", ",") - if err != nil { - return nil, err - } - return makeObject(props, schema) -} - -// propsFromString returns a properties map that is created by splitting a source string by propDelim and valueDelim. -// The source string must have a valid format: pairs separated by . -// The function returns an error when the source string has an invalid format. -func propsFromString(src, propDelim, valueDelim string) (map[string]string, error) { - props := make(map[string]string) - pairs := strings.Split(src, propDelim) - - // When propDelim and valueDelim is equal the source string follow the next rule: - // every even item of pairs is a properies's name, and the subsequent odd item is a property's value. - if propDelim == valueDelim { - // Taking into account the rule above, a valid source string must be splitted by propDelim - // to an array with an even number of items. - if len(pairs)%2 != 0 { - return nil, &ParseError{ - Kind: KindInvalidFormat, - Value: src, - Reason: fmt.Sprintf("a value must be a list of object's properties in format \"name%svalue\" separated by %s", valueDelim, propDelim), - } - } - for i := 0; i < len(pairs)/2; i++ { - props[pairs[i*2]] = pairs[i*2+1] - } - return props, nil - } - - // When propDelim and valueDelim is not equal the source string follow the next rule: - // every item of pairs is a string that follows format . - for _, pair := range pairs { - prop := strings.Split(pair, valueDelim) - if len(prop) != 2 { - return nil, &ParseError{ - Kind: KindInvalidFormat, - Value: src, - Reason: fmt.Sprintf("a value must be a list of object's properties in format \"name%svalue\" separated by %s", valueDelim, propDelim), - } - } - props[prop[0]] = prop[1] - } - return props, nil -} - -// makeObject returns an object that contains properties from props. -// A value of every property is parsed as a primitive value. -// The function returns an error when an error happened while parse object's properties. -func makeObject(props map[string]string, schema *openapi3.SchemaRef) (map[string]interface{}, error) { - obj := make(map[string]interface{}) - for propName, propSchema := range schema.Value.Properties { - value, err := parsePrimitive(props[propName], propSchema) - if err != nil { - if v, ok := err.(*ParseError); ok { - return nil, &ParseError{path: []interface{}{propName}, Cause: v} - } - return nil, fmt.Errorf("property %q: %s", propName, err) - } - obj[propName] = value - } - return obj, nil -} - -// parseArray returns an array that contains items from a raw array. -// Every item is parsed as a primitive value. -// The function returns an error when an error happened while parse array's items. -func parseArray(raw []string, schemaRef *openapi3.SchemaRef) ([]interface{}, error) { - var value []interface{} - for i, v := range raw { - item, err := parsePrimitive(v, schemaRef.Value.Items) - if err != nil { - if v, ok := err.(*ParseError); ok { - return nil, &ParseError{path: []interface{}{i}, Cause: v} - } - return nil, fmt.Errorf("item %d: %s", i, err) - } - value = append(value, item) - } - return value, nil -} - -// parsePrimitive returns a value that is created by parsing a source string to a primitive type -// that is specified by a JSON schema. The function returns nil when the source string is empty. -// The function panics when a JSON schema has a non primitive type. -func parsePrimitive(raw string, schema *openapi3.SchemaRef) (interface{}, error) { - if raw == "" { - return nil, nil - } - switch schema.Value.Type { - case "integer": - v, err := strconv.ParseFloat(raw, 64) - if err != nil { - return nil, &ParseError{Kind: KindInvalidFormat, Value: raw, Reason: "an invalid interger", Cause: err} - } - return v, nil - case "number": - v, err := strconv.ParseFloat(raw, 64) - if err != nil { - return nil, &ParseError{Kind: KindInvalidFormat, Value: raw, Reason: "an invalid number", Cause: err} - } - return v, nil - case "boolean": - v, err := strconv.ParseBool(raw) - if err != nil { - return nil, &ParseError{Kind: KindInvalidFormat, Value: raw, Reason: "an invalid number", Cause: err} - } - return v, nil - case "string": - return raw, nil - default: - panic(fmt.Sprintf("schema has non primitive type %q", schema.Value.Type)) - } -} - -// EncodingFn is a function that returns an encoding of a request body's part. -type EncodingFn func(partName string) *openapi3.Encoding - -// BodyDecoder is an interface to decode a body of a request or response. -// An implementation must return a value that is a primitive, []interface{}, or map[string]interface{}. -type BodyDecoder func(io.Reader, http.Header, *openapi3.SchemaRef, EncodingFn) (interface{}, error) - -// bodyDecoders contains decoders for supported content types of a body. -// By default, there is content type "application/json" is supported only. -var bodyDecoders = make(map[string]BodyDecoder) - -// RegisterBodyDecoder registers a request body's decoder for a content type. -// -// If a decoder for the specified content type already exists, the function replaces -// it with the specified decoder. -func RegisterBodyDecoder(contentType string, decoder BodyDecoder) { - if contentType == "" { - panic("contentType is empty") - } - if decoder == nil { - panic("decoder is not defined") - } - bodyDecoders[contentType] = decoder -} - -// UnregisterBodyDecoder dissociates a body decoder from a content type. -// -// Decoding this content type will result in an error. -func UnregisterBodyDecoder(contentType string) { - if contentType == "" { - panic("contentType is empty") - } - delete(bodyDecoders, contentType) -} - -// decodeBody returns a decoded body. -// The function returns ParseError when a body is invalid. -func decodeBody(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { - contentType := header.Get(http.CanonicalHeaderKey("Content-Type")) - mediaType := parseMediaType(contentType) - decoder, ok := bodyDecoders[mediaType] - if !ok { - return nil, &ParseError{ - Kind: KindUnsupportedFormat, - Reason: fmt.Sprintf("unsupported content type %q", mediaType), - } - } - value, err := decoder(body, header, schema, encFn) - if err != nil { - return nil, err - } - return value, nil -} - -func init() { - RegisterBodyDecoder("text/plain", plainBodyDecoder) - RegisterBodyDecoder("application/json", jsonBodyDecoder) - RegisterBodyDecoder("application/x-www-form-urlencoded", urlencodedBodyDecoder) - RegisterBodyDecoder("multipart/form-data", multipartBodyDecoder) - RegisterBodyDecoder("application/octet-stream", FileBodyDecoder) -} - -func plainBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { - data, err := ioutil.ReadAll(body) - if err != nil { - return nil, &ParseError{Kind: KindInvalidFormat, Cause: err} - } - return string(data), nil -} - -func jsonBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { - var value interface{} - if err := json.NewDecoder(body).Decode(&value); err != nil { - return nil, &ParseError{Kind: KindInvalidFormat, Cause: err} - } - return value, nil -} - -func urlencodedBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { - // Validate JSON schema of request body. - // By the OpenAPI 3 specification request body's schema must have type "object". - // Properties of the schema describes individual parts of request body. - if schema.Value.Type != "object" { - return nil, fmt.Errorf("unsupported JSON schema of request body") - } - for propName, propSchema := range schema.Value.Properties { - switch propSchema.Value.Type { - case "object": - return nil, fmt.Errorf("unsupported JSON schema of request body's property %q", propName) - case "array": - items := propSchema.Value.Items.Value - if items.Type != "string" && items.Type != "integer" && items.Type != "number" && items.Type != "boolean" { - return nil, fmt.Errorf("unsupported JSON schema of request body's property %q", propName) - } - } - } - - // Parse form. - b, err := ioutil.ReadAll(body) - if err != nil { - return nil, err - } - values, err := url.ParseQuery(string(b)) - if err != nil { - return nil, err - } - - // Make an object value from form values. - obj := make(map[string]interface{}) - dec := &urlValuesDecoder{values: values} - for name, prop := range schema.Value.Properties { - var ( - value interface{} - enc *openapi3.Encoding - ) - if encFn != nil { - enc = encFn(name) - } - sm := enc.SerializationMethod() - - if value, err = decodeValue(dec, name, sm, prop); err != nil { - return nil, err - } - obj[name] = value - } - - return obj, nil -} - -func multipartBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { - if schema.Value.Type != "object" { - return nil, fmt.Errorf("unsupported JSON schema of request body") - } - - // Parse form. - values := make(map[string][]interface{}) - contentType := header.Get(http.CanonicalHeaderKey("Content-Type")) - _, params, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, err - } - mr := multipart.NewReader(body, params["boundary"]) - for { - var part *multipart.Part - if part, err = mr.NextPart(); err == io.EOF { - break - } - if err != nil { - return nil, err - } - - var ( - name = part.FormName() - enc *openapi3.Encoding - ) - if encFn != nil { - enc = encFn(name) - } - subEncFn := func(string) *openapi3.Encoding { return enc } - // If the property's schema has type "array" it is means that the form contains a few parts with the same name. - // Every such part has a type that is defined by an items schema in the property's schema. - valueSchema := schema.Value.Properties[name] - if valueSchema.Value.Type == "array" { - valueSchema = valueSchema.Value.Items - } - - var value interface{} - if value, err = decodeBody(part, http.Header(part.Header), valueSchema, subEncFn); err != nil { - if v, ok := err.(*ParseError); ok { - return nil, &ParseError{path: []interface{}{name}, Cause: v} - } - return nil, fmt.Errorf("part %s: %s", name, err) - } - values[name] = append(values[name], value) - } - - // Make an object value from form values. - obj := make(map[string]interface{}) - for name, prop := range schema.Value.Properties { - vv := values[name] - if len(vv) == 0 { - continue - } - if prop.Value.Type == "array" { - obj[name] = vv - } else { - obj[name] = vv[0] - } - } - - return obj, nil -} - -// FileBodyDecoder is a body decoder that decodes a file body to a string. -func FileBodyDecoder(body io.Reader, header http.Header, schema *openapi3.SchemaRef, encFn EncodingFn) (interface{}, error) { - data, err := ioutil.ReadAll(body) - if err != nil { - return nil, err - } - return string(data), nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/router.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/router.go deleted file mode 100644 index fefe313dd..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/router.go +++ /dev/null @@ -1,214 +0,0 @@ -package openapi3filter - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/getkin/kin-openapi/openapi3" - "github.com/getkin/kin-openapi/pathpattern" -) - -type Route struct { - Swagger *openapi3.Swagger - Server *openapi3.Server - Path string - PathItem *openapi3.PathItem - Method string - Operation *openapi3.Operation - - // For developers who want use the router for handling too - Handler http.Handler -} - -// Routers maps a HTTP request to a Router. -type Routers []*Router - -func (routers Routers) FindRoute(method string, url *url.URL) (*Router, *Route, map[string]string, error) { - for _, router := range routers { - // Skip routers that have DO NOT have servers - if len(router.swagger.Servers) == 0 { - continue - } - route, pathParams, err := router.FindRoute(method, url) - if err == nil { - return router, route, pathParams, nil - } - } - for _, router := range routers { - // Skip routers that DO have servers - if len(router.swagger.Servers) > 0 { - continue - } - route, pathParams, err := router.FindRoute(method, url) - if err == nil { - return router, route, pathParams, nil - } - } - return nil, nil, nil, &RouteError{ - Reason: "None of the routers matches", - } -} - -// Router maps a HTTP request to an OpenAPI operation. -type Router struct { - swagger *openapi3.Swagger - pathNode *pathpattern.Node -} - -// NewRouter creates a new router. -// -// If the given Swagger has servers, router will use them. -// All operations of the Swagger will be added to the router. -func NewRouter() *Router { - return &Router{} -} - -// WithSwaggerFromFile loads the Swagger file and adds it using WithSwagger. -// Panics on any error. -func (router *Router) WithSwaggerFromFile(path string) *Router { - if err := router.AddSwaggerFromFile(path); err != nil { - panic(err) - } - return router -} - -// WithSwagger adds all operations in the OpenAPI specification. -// Panics on any error. -func (router *Router) WithSwagger(swagger *openapi3.Swagger) *Router { - if err := router.AddSwagger(swagger); err != nil { - panic(err) - } - return router -} - -// AddSwaggerFromFile loads the Swagger file and adds it using AddSwagger. -func (router *Router) AddSwaggerFromFile(path string) error { - swagger, err := openapi3.NewSwaggerLoader().LoadSwaggerFromFile(path) - if err != nil { - return err - } - return router.AddSwagger(swagger) -} - -// AddSwagger adds all operations in the OpenAPI specification. -func (router *Router) AddSwagger(swagger *openapi3.Swagger) error { - if err := swagger.Validate(context.TODO()); err != nil { - return fmt.Errorf("Validating Swagger failed: %v", err) - } - router.swagger = swagger - root := router.node() - for path, pathItem := range swagger.Paths { - for method, operation := range pathItem.Operations() { - method = strings.ToUpper(method) - if err := root.Add(method+" "+path, &Route{ - Swagger: swagger, - Path: path, - PathItem: pathItem, - Method: method, - Operation: operation, - }, nil); err != nil { - return err - } - } - } - return nil -} - -// AddRoute adds a route in the router. -func (router *Router) AddRoute(route *Route) error { - method := route.Method - if method == "" { - return errors.New("Route is missing method") - } - method = strings.ToUpper(method) - path := route.Path - if path == "" { - return errors.New("Route is missing path") - } - return router.node().Add(method+" "+path, router, nil) -} - -func (router *Router) node() *pathpattern.Node { - root := router.pathNode - if root == nil { - root = &pathpattern.Node{} - router.pathNode = root - } - return root -} - -func (router *Router) FindRoute(method string, url *url.URL) (*Route, map[string]string, error) { - swagger := router.swagger - - // Get server - servers := swagger.Servers - var server *openapi3.Server - var remainingPath string - var pathParams map[string]string - if len(servers) == 0 { - remainingPath = url.Path - } else { - var paramValues []string - server, paramValues, remainingPath = servers.MatchURL(url) - if server == nil { - return nil, nil, &RouteError{ - Route: Route{ - Swagger: swagger, - }, - Reason: "Does not match any server", - } - } - pathParams = make(map[string]string, 8) - paramNames, _ := server.ParameterNames() - for i, value := range paramValues { - name := paramNames[i] - pathParams[name] = value - } - } - - // Get PathItem - root := router.node() - var route *Route - node, paramValues := root.Match(method + " " + remainingPath) - if node != nil { - route, _ = node.Value.(*Route) - } - if route == nil { - return nil, nil, &RouteError{ - Route: Route{ - Swagger: swagger, - Server: server, - }, - Reason: "Path was not found", - } - } - - // Get operation - pathItem := route.PathItem - operation := pathItem.GetOperation(method) - if operation == nil { - return nil, nil, &RouteError{ - Route: Route{ - Swagger: swagger, - Server: server, - }, - Reason: "Path doesn't support the HTTP method", - } - } - if pathParams == nil { - pathParams = make(map[string]string, len(paramValues)) - } - paramKeys := node.VariableNames - for i, value := range paramValues { - key := paramKeys[i] - if strings.HasSuffix(key, "*") { - key = key[:len(key)-1] - } - pathParams[key] = value - } - return route, pathParams, nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request.go deleted file mode 100644 index 1a3a9badf..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request.go +++ /dev/null @@ -1,294 +0,0 @@ -package openapi3filter - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "net/http" - "sort" - - "github.com/getkin/kin-openapi/openapi3" -) - -// ErrInvalidRequired is an error that happens when a required value of a parameter or request's body is not defined. -var ErrInvalidRequired = fmt.Errorf("must have a value") - -func ValidateRequest(c context.Context, input *RequestValidationInput) error { - options := input.Options - if options == nil { - options = DefaultOptions - } - route := input.Route - if route == nil { - return errors.New("invalid route") - } - operation := route.Operation - if operation == nil { - return errRouteMissingOperation - } - operationParameters := operation.Parameters - pathItemParameters := route.PathItem.Parameters - - // For each parameter of the PathItem - for _, parameterRef := range pathItemParameters { - parameter := parameterRef.Value - if operationParameters != nil { - if override := operationParameters.GetByInAndName(parameter.In, parameter.Name); override != nil { - continue - } - if err := ValidateParameter(c, input, parameter); err != nil { - return err - } - } - } - - // For each parameter of the Operation - for _, parameter := range operationParameters { - if err := ValidateParameter(c, input, parameter.Value); err != nil { - return err - } - } - - // RequestBody - requestBody := operation.RequestBody - if requestBody != nil && !options.ExcludeRequestBody { - if err := ValidateRequestBody(c, input, requestBody.Value); err != nil { - return err - } - } - - // Security - security := operation.Security - if security != nil { - if err := ValidateSecurityRequirements(c, input, *security); err != nil { - return err - } - } - return nil -} - -// ValidateParameter validates a parameter's value by JSON schema. -// The function returns RequestError with a ParseError cause when unable to parse a value. -// The function returns RequestError with ErrInvalidRequired cause when a value of a required parameter is not defined. -// The function returns RequestError with a openapi3.SchemaError cause when a value is invalid by JSON schema. -func ValidateParameter(c context.Context, input *RequestValidationInput, parameter *openapi3.Parameter) error { - if parameter.Schema == nil && parameter.Content == nil { - // We have no schema for the parameter. Assume that everything passes - // a schema-less check, but this could also be an error. The Swagger - // validation allows this to happen. - return nil - } - - var value interface{} - var err error - var schema *openapi3.Schema - - // Validation will ensure that we either have content or schema. - if parameter.Content != nil { - value, schema, err = decodeContentParameter(parameter, input) - if err != nil { - return &RequestError{Input: input, Parameter: parameter, Err: err} - } - } else { - value, err = decodeStyledParameter(parameter, input) - if err != nil { - return &RequestError{Input: input, Parameter: parameter, Err: err} - } - schema = parameter.Schema.Value - } - // Validate a parameter's value. - if value == nil { - if parameter.Required { - return &RequestError{Input: input, Parameter: parameter, Reason: "must have a value", Err: ErrInvalidRequired} - } - return nil - } - if schema == nil { - // A parameter's schema is not defined so skip validation of a parameter's value. - return nil - } - if err = schema.VisitJSON(value); err != nil { - return &RequestError{Input: input, Parameter: parameter, Err: err} - } - return nil -} - -// ValidateRequestBody validates data of a request's body. -// -// The function returns RequestError with ErrInvalidRequired cause when a value is required but not defined. -// The function returns RequestError with a openapi3.SchemaError cause when a value is invalid by JSON schema. -func ValidateRequestBody(c context.Context, input *RequestValidationInput, requestBody *openapi3.RequestBody) error { - var ( - req = input.Request - data []byte - ) - - if req.Body != http.NoBody { - defer req.Body.Close() - var err error - if data, err = ioutil.ReadAll(req.Body); err != nil { - return &RequestError{ - Input: input, - RequestBody: requestBody, - Reason: "reading failed", - Err: err, - } - } - // Put the data back into the input - req.Body = ioutil.NopCloser(bytes.NewReader(data)) - } - - if len(data) == 0 { - if requestBody.Required { - return &RequestError{Input: input, RequestBody: requestBody, Err: ErrInvalidRequired} - } - return nil - } - - content := requestBody.Content - if len(content) == 0 { - // A request's body does not have declared content, so skip validation. - return nil - } - - inputMIME := req.Header.Get("Content-Type") - contentType := requestBody.Content.Get(inputMIME) - if contentType == nil { - return &RequestError{ - Input: input, - RequestBody: requestBody, - Reason: fmt.Sprintf("header 'Content-Type' has unexpected value: %q", inputMIME), - } - } - - if contentType.Schema == nil { - // A JSON schema that describes the received data is not declared, so skip validation. - return nil - } - - encFn := func(name string) *openapi3.Encoding { return contentType.Encoding[name] } - value, err := decodeBody(bytes.NewReader(data), req.Header, contentType.Schema, encFn) - if err != nil { - return &RequestError{ - Input: input, - RequestBody: requestBody, - Reason: "failed to decode request body", - Err: err, - } - } - - // Validate JSON with the schema - if err := contentType.Schema.Value.VisitJSON(value); err != nil { - return &RequestError{ - Input: input, - RequestBody: requestBody, - Reason: "doesn't match the schema", - Err: err, - } - } - return nil -} - -// ValidateSecurityRequirements validates a multiple OpenAPI 3 security requirements. -// Returns nil if one of them inputed. -// Otherwise returns an error describing the security failures. -func ValidateSecurityRequirements(c context.Context, input *RequestValidationInput, srs openapi3.SecurityRequirements) error { - // Alternative requirements - if len(srs) == 0 { - return nil - } - - doneChan := make(chan bool, len(srs)) - errs := make([]error, len(srs)) - - // For each alternative - for i, securityRequirement := range srs { - // Capture index from iteration variable - currentIndex := i - currentSecurityRequirement := securityRequirement - go func() { - defer func() { - v := recover() - if v != nil { - if err, ok := v.(error); ok { - errs[currentIndex] = err - } else { - errs[currentIndex] = errors.New("Panicked") - } - doneChan <- false - } - }() - if err := validateSecurityRequirement(c, input, currentSecurityRequirement); err == nil { - doneChan <- true - } else { - errs[currentIndex] = err - doneChan <- false - } - }() - } - - // Wait for all - for i := 0; i < len(srs); i++ { - ok := <-doneChan - if ok { - close(doneChan) - return nil - } - } - return &SecurityRequirementsError{ - SecurityRequirements: srs, - Errors: errs, - } -} - -// validateSecurityRequirement validates a single OpenAPI 3 security requirement -func validateSecurityRequirement(c context.Context, input *RequestValidationInput, securityRequirement openapi3.SecurityRequirement) error { - swagger := input.Route.Swagger - if swagger == nil { - return errRouteMissingSwagger - } - securitySchemes := swagger.Components.SecuritySchemes - - // Ensure deterministic order - names := make([]string, 0, len(securityRequirement)) - for name := range securityRequirement { - names = append(names, name) - } - sort.Strings(names) - - // Get authentication function - options := input.Options - if options == nil { - options = DefaultOptions - } - f := options.AuthenticationFunc - if f == nil { - return ErrAuthenticationServiceMissing - } - - if len(names) > 0 { - name := names[0] - var securityScheme *openapi3.SecurityScheme - if securitySchemes != nil { - if ref := securitySchemes[name]; ref != nil { - securityScheme = ref.Value - } - } - if securityScheme == nil { - return &RequestError{ - Input: input, - Err: fmt.Errorf("Security scheme '%s' is not declared", name), - } - } - scopes := securityRequirement[name] - return f(c, &AuthenticationInput{ - RequestValidationInput: input, - SecuritySchemeName: name, - SecurityScheme: securityScheme, - Scopes: scopes, - }) - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request_input.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request_input.go deleted file mode 100644 index 150e2700b..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_request_input.go +++ /dev/null @@ -1,37 +0,0 @@ -package openapi3filter - -import ( - "net/http" - "net/url" - - "github.com/getkin/kin-openapi/openapi3" -) - -// This function takes a parameter definition from the swagger spec, and -// the value which we received for it. It is expected to return the -// value unmarshaled into an interface which can be traversed for -// validation, it should also return the schema to be used for validating the -// object, since there can be more than one in the content spec. -// -// If a query parameter appears multiple times, values[] will have more -// than one value, but for all other parameter types it should have just -// one. -type ContentParameterDecoder func(param *openapi3.Parameter, values []string) (interface{}, *openapi3.Schema, error) - -type RequestValidationInput struct { - Request *http.Request - PathParams map[string]string - QueryParams url.Values - Route *Route - Options *Options - ParamDecoder ContentParameterDecoder -} - -func (input *RequestValidationInput) GetQueryParams() url.Values { - q := input.QueryParams - if q == nil { - q = input.Request.URL.Query() - input.QueryParams = q - } - return q -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response.go deleted file mode 100644 index ce376ce91..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package openapi3filter validates that requests and inputs request an OpenAPI 3 specification file. -package openapi3filter - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/http" - - "github.com/getkin/kin-openapi/openapi3" -) - -func ValidateResponse(c context.Context, input *ResponseValidationInput) error { - req := input.RequestValidationInput.Request - switch req.Method { - case "HEAD": - return nil - } - status := input.Status - if status < 100 { - return &ResponseError{ - Input: input, - Reason: "illegal status code", - Err: fmt.Errorf("Status %d", status), - } - } - - // These status codes will never be validated. - // TODO: The list is probably missing some. - switch status { - case http.StatusNotModified, - http.StatusPermanentRedirect, - http.StatusTemporaryRedirect, - http.StatusMovedPermanently: - return nil - } - route := input.RequestValidationInput.Route - options := input.Options - if options == nil { - options = DefaultOptions - } - - // Find input for the current status - responses := route.Operation.Responses - if len(responses) == 0 { - return nil - } - responseRef := responses.Get(status) // Response - if responseRef == nil { - responseRef = responses.Default() // Default input - } - if responseRef == nil { - // By default, status that is not documented is allowed. - if !options.IncludeResponseStatus { - return nil - } - - return &ResponseError{Input: input, Reason: "status is not supported"} - } - response := responseRef.Value - if response == nil { - return &ResponseError{Input: input, Reason: "response has not been resolved"} - } - - if options.ExcludeResponseBody { - // A user turned off validation of a response's body. - return nil - } - - content := response.Content - if len(content) == 0 || options.ExcludeResponseBody { - // An operation does not contains a validation schema for responses with this status code. - return nil - } - - inputMIME := input.Header.Get("Content-Type") - contentType := content.Get(inputMIME) - if contentType == nil { - return &ResponseError{ - Input: input, - Reason: fmt.Sprintf("input header 'Content-Type' has unexpected value: %q", inputMIME), - } - } - - if contentType.Schema == nil { - // An operation does not contains a validation schema for responses with this status code. - return nil - } - - // Read response's body. - body := input.Body - - // Response would contain partial or empty input body - // after we begin reading. - // Ensure that this doesn't happen. - input.Body = nil - - // Ensure we close the reader - defer body.Close() - - // Read all - data, err := ioutil.ReadAll(body) - if err != nil { - return &ResponseError{ - Input: input, - Reason: "failed to read response body", - Err: err, - } - } - - // Put the data back into the response. - input.SetBodyBytes(data) - - encFn := func(name string) *openapi3.Encoding { return contentType.Encoding[name] } - value, err := decodeBody(bytes.NewBuffer(data), input.Header, contentType.Schema, encFn) - if err != nil { - return &ResponseError{ - Input: input, - Reason: "failed to decode response body", - Err: err, - } - } - - // Validate data with the schema. - if err := contentType.Schema.Value.VisitJSON(value); err != nil { - return &ResponseError{ - Input: input, - Reason: "response body doesn't match the schema", - Err: err, - } - } - return nil -} diff --git a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response_input.go b/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response_input.go deleted file mode 100644 index edf38730a..000000000 --- a/vendor/github.com/getkin/kin-openapi/openapi3filter/validate_response_input.go +++ /dev/null @@ -1,42 +0,0 @@ -package openapi3filter - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -type ResponseValidationInput struct { - RequestValidationInput *RequestValidationInput - Status int - Header http.Header - Body io.ReadCloser - Options *Options -} - -func (input *ResponseValidationInput) SetBodyBytes(value []byte) *ResponseValidationInput { - input.Body = ioutil.NopCloser(bytes.NewReader(value)) - return input -} - -var JSONPrefixes = []string{ - ")]}',\n", -} - -// TrimJSONPrefix trims one of the possible prefixes -func TrimJSONPrefix(data []byte) []byte { -search: - for _, prefix := range JSONPrefixes { - if len(data) < len(prefix) { - continue - } - for i, b := range data[:len(prefix)] { - if b != prefix[i] { - continue search - } - } - return data[len(prefix):] - } - return data -} diff --git a/vendor/github.com/getkin/kin-openapi/pathpattern/node.go b/vendor/github.com/getkin/kin-openapi/pathpattern/node.go deleted file mode 100644 index 883d63af0..000000000 --- a/vendor/github.com/getkin/kin-openapi/pathpattern/node.go +++ /dev/null @@ -1,333 +0,0 @@ -// Package pathpattern implements path matching. -// -// Examples of supported patterns: -// * "/" -// * "/abc"" -// * "/abc/{variable}" (matches until next '/' or end-of-string) -// * "/abc/{variable*}" (matches everything, including "/abc" if "/abc" has noot) -// * "/abc/{ variable | prefix_(.*}_suffix }" (matches regular expressions) -package pathpattern - -import ( - "bytes" - "fmt" - "regexp" - "sort" - "strings" -) - -var DefaultOptions = &Options{ - SupportWildcard: true, -} - -type Options struct { - SupportWildcard bool - SupportRegExp bool -} - -// PathFromHost converts a host pattern to a path pattern. -// -// Examples: -// * PathFromHost("some-subdomain.domain.com", false) -> "com/./domain/./some-subdomain" -// * PathFromHost("some-subdomain.domain.com", true) -> "com/./domain/./subdomain/-/some" -func PathFromHost(host string, specialDashes bool) string { - buf := make([]byte, 0, len(host)) - end := len(host) - - // Go from end to start - for start := end - 1; start >= 0; start-- { - switch host[start] { - case '.': - buf = append(buf, host[start+1:end]...) - buf = append(buf, '/', '.', '/') - end = start - case '-': - if specialDashes { - buf = append(buf, host[start+1:end]...) - buf = append(buf, '/', '-', '/') - end = start - } - } - } - buf = append(buf, host[:end]...) - return string(buf) -} - -type Node struct { - VariableNames []string - Value interface{} - Suffixes SuffixList -} - -func (currentNode *Node) String() string { - buf := bytes.NewBuffer(make([]byte, 0, 255)) - currentNode.toBuffer(buf, "") - return buf.String() -} - -func (currentNode *Node) toBuffer(buf *bytes.Buffer, linePrefix string) { - if value := currentNode.Value; value != nil { - buf.WriteString(linePrefix) - buf.WriteString("VALUE: ") - fmt.Fprint(buf, value) - buf.WriteString("\n") - } - suffixes := currentNode.Suffixes - if len(suffixes) > 0 { - newLinePrefix := linePrefix + " " - for _, suffix := range suffixes { - buf.WriteString(linePrefix) - buf.WriteString("PATTERN: ") - buf.WriteString(suffix.String()) - buf.WriteString("\n") - suffix.Node.toBuffer(buf, newLinePrefix) - } - } -} - -type SuffixKind int - -// Note that order is important! -const ( - // SuffixKindConstant matches a constant string - SuffixKindConstant = SuffixKind(iota) - - // SuffixKindRegExp matches a regular expression - SuffixKindRegExp - - // SuffixKindVariable matches everything until '/' - SuffixKindVariable - - // SuffixKindEverything matches everything (until end-of-string) - SuffixKindEverything -) - -// Suffix describes condition that -type Suffix struct { - Kind SuffixKind - Pattern string - - // compiled regular expression - regExp *regexp.Regexp - - // Next node - Node *Node -} - -func EqualSuffix(a, b Suffix) bool { - return a.Kind == b.Kind && a.Pattern == b.Pattern -} - -func (suffix Suffix) String() string { - switch suffix.Kind { - case SuffixKindConstant: - return suffix.Pattern - case SuffixKindVariable: - return "{_}" - case SuffixKindEverything: - return "{_*}" - default: - return "{_|" + suffix.Pattern + "}" - } -} - -type SuffixList []Suffix - -func (list SuffixList) Less(i, j int) bool { - a, b := list[i], list[j] - ak, bk := a.Kind, b.Kind - if ak < bk { - return true - } else if bk < ak { - return false - } - return a.Pattern > b.Pattern -} - -func (list SuffixList) Len() int { - return len(list) -} - -func (list SuffixList) Swap(i, j int) { - a, b := list[i], list[j] - list[i], list[j] = b, a -} - -func (currentNode *Node) MustAdd(path string, value interface{}, options *Options) { - node, err := currentNode.CreateNode(path, options) - if err != nil { - panic(err) - } - node.Value = value -} - -func (currentNode *Node) Add(path string, value interface{}, options *Options) error { - node, err := currentNode.CreateNode(path, options) - if err != nil { - return err - } - node.Value = value - return nil -} - -func (currentNode *Node) CreateNode(path string, options *Options) (*Node, error) { - if options == nil { - options = DefaultOptions - } - for strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - remaining := path - var variableNames []string -loop: - for { - //remaining = strings.TrimPrefix(remaining, "/") - if len(remaining) == 0 { - // This node is the right one - // Check whether another route already leads to this node - currentNode.VariableNames = variableNames - return currentNode, nil - } - - suffix := Suffix{} - var i int - if strings.HasPrefix(remaining, "/") { - remaining = remaining[1:] - suffix.Kind = SuffixKindConstant - suffix.Pattern = "/" - } else { - i = strings.IndexAny(remaining, "/{") - if i < 0 { - i = len(remaining) - } - if i > 0 { - // Constant string pattern - suffix.Kind = SuffixKindConstant - suffix.Pattern = remaining[:i] - remaining = remaining[i:] - } else if remaining[0] == '{' { - // This is probably a variable - suffix.Kind = SuffixKindVariable - - // Find variable name - i := strings.IndexByte(remaining, '}') - if i < 0 { - return nil, fmt.Errorf("Missing '}' in: %s", path) - } - variableName := strings.TrimSpace(remaining[1:i]) - remaining = remaining[i+1:] - - if options.SupportRegExp { - // See if it has regular expression - i = strings.IndexByte(variableName, '|') - if i >= 0 { - suffix.Kind = SuffixKindRegExp - suffix.Pattern = strings.TrimSpace(variableName[i+1:]) - variableName = strings.TrimSpace(variableName[:i]) - } - } - if suffix.Kind == SuffixKindVariable && options.SupportWildcard { - if strings.HasSuffix(variableName, "*") { - suffix.Kind = SuffixKindEverything - } - } - variableNames = append(variableNames, variableName) - } - } - - // Find existing matcher - for _, existing := range currentNode.Suffixes { - if EqualSuffix(existing, suffix) { - currentNode = existing.Node - continue loop - } - } - - // Compile regular expression - if suffix.Kind == SuffixKindRegExp { - regExp, err := regexp.Compile(suffix.Pattern) - if err != nil { - return nil, fmt.Errorf("Invalid regular expression in: %s", path) - } - suffix.regExp = regExp - } - - // Create new node - newNode := &Node{} - suffix.Node = newNode - currentNode.Suffixes = append(currentNode.Suffixes, suffix) - sort.Sort(currentNode.Suffixes) - currentNode = newNode - continue loop - } -} - -func (currentNode *Node) Match(path string) (*Node, []string) { - for strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - variableValues := make([]string, 0, 8) - return currentNode.matchRemaining(path, false, variableValues) -} - -func (currentNode *Node) matchRemaining(remaining string, hasExtraSlash bool, paramValues []string) (*Node, []string) { - // Remove "/" from the beginning - // if len(remaining) > 0 && remaining[0] == '/' { - // remaining = remaining[1:] - // } - - // Check if this node matches - if len(remaining) == 0 && currentNode.Value != nil { - return currentNode, paramValues - } - - // See if any suffix matches - for _, suffix := range currentNode.Suffixes { - var resultNode *Node - var resultValues []string - switch suffix.Kind { - case SuffixKindConstant: - pattern := suffix.Pattern - if strings.HasPrefix(remaining, pattern) { - newRemaining := remaining[len(pattern):] - resultNode, resultValues = suffix.Node.matchRemaining(newRemaining, hasExtraSlash, paramValues) - } else if len(remaining) == 0 && pattern == "/" { - resultNode, resultValues = suffix.Node.matchRemaining(remaining, hasExtraSlash, paramValues) - } - case SuffixKindVariable: - i := strings.IndexByte(remaining, '/') - if i < 0 { - i = len(remaining) - } - newParamValues := append(paramValues, remaining[:i]) - newRemaining := remaining[i:] - resultNode, resultValues = suffix.Node.matchRemaining(newRemaining, hasExtraSlash, newParamValues) - case SuffixKindEverything: - newParamValues := append(paramValues, remaining) - resultNode, resultValues = suffix.Node, newParamValues - case SuffixKindRegExp: - i := strings.IndexByte(remaining, '/') - if i < 0 { - i = len(remaining) - } - paramValue := remaining[:i] - regExp := suffix.regExp - if regExp.MatchString(paramValue) { - matches := regExp.FindStringSubmatch(paramValue) - if len(matches) > 1 { - paramValue = matches[1] - } - newParamValues := append(paramValues, paramValue) - newRemaining := remaining[i:] - resultNode, resultValues = suffix.Node.matchRemaining(newRemaining, hasExtraSlash, newParamValues) - } - } - if resultNode != nil && resultNode.Value != nil { - // This suffix matched - return resultNode, resultValues - } - } - - // No suffix matched - return nil, nil -} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e0..000000000 --- a/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc0..000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de..000000000 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4..000000000 --- a/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 586007402..000000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054a8..000000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/glycerine/go-unsnap-stream/.gitignore b/vendor/github.com/glycerine/go-unsnap-stream/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/glycerine/go-unsnap-stream/LICENSE b/vendor/github.com/glycerine/go-unsnap-stream/LICENSE deleted file mode 100644 index a441b993b..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT license. - -Copyright (c) 2014 the go-unsnap-stream authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/glycerine/go-unsnap-stream/README.md b/vendor/github.com/glycerine/go-unsnap-stream/README.md deleted file mode 100644 index b1b8c7475..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/README.md +++ /dev/null @@ -1,20 +0,0 @@ -go-unsnap-stream -================ - -This is a small golang library for decoding and encoding the snappy *streaming* format, specified here: https://github.com/google/snappy/blob/master/framing_format.txt - -Note that the *streaming or framing format* for snappy is different from snappy itself. Think of it as a train of boxcars: the streaming format breaks your data in chunks, applies snappy to each chunk alone, then puts a thin wrapper around the chunk, and sends it along in turn. You can begin decoding before receiving everything. And memory requirements for decoding are sane. - -Strangely, though the streaming format was first proposed in Go[1][2], it was never upated, and I could not locate any other library for Go that would handle the streaming/framed snappy format. Hence this implementation of the spec. There is a command line tool[3] that has a C implementation, but this is the only Go implementation that I am aware of. The reference for the framing/streaming spec seems to be the python implementation[4]. - -For binary compatibility with the python implementation, one could use the C-snappy compressor/decompressor code directly; using github.com/dgryski/go-csnappy. In fact we did this for a while to verify byte-for-byte compatiblity, as the native Go implementation produces slightly different binary compression (still conformant with the standard of course), which made test-diffs harder, and some have complained about it being slower than the C. - -However, while the c-snappy was useful for checking compatibility, it introduced dependencies on external C libraries (both the c-snappy library and the C standard library). Our go binary executable that used the go-unsnap-stream library was no longer standalone, and deployment was painful if not impossible if the target had a different C standard library. So we've gone back to using the snappy-go implementation (entirely in Go) for ease of deployment. See the comments at the top of unsnap.go if you wish to use c-snappy instead. - -[1] https://groups.google.com/forum/#!msg/snappy-compression/qvLNe2cSH9s/R19oBC-p7g4J - -[2] https://codereview.appspot.com/5167058 - -[3] https://github.com/kubo/snzip - -[4] https://pypi.python.org/pypi/python-snappy \ No newline at end of file diff --git a/vendor/github.com/glycerine/go-unsnap-stream/binary.dat b/vendor/github.com/glycerine/go-unsnap-stream/binary.dat deleted file mode 100644 index f31eee2e2..000000000 Binary files a/vendor/github.com/glycerine/go-unsnap-stream/binary.dat and /dev/null differ diff --git a/vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy b/vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy deleted file mode 100644 index ed3702429..000000000 Binary files a/vendor/github.com/glycerine/go-unsnap-stream/binary.dat.snappy and /dev/null differ diff --git a/vendor/github.com/glycerine/go-unsnap-stream/rbuf.go b/vendor/github.com/glycerine/go-unsnap-stream/rbuf.go deleted file mode 100644 index f771c392d..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/rbuf.go +++ /dev/null @@ -1,375 +0,0 @@ -package unsnap - -// copyright (c) 2014, Jason E. Aten -// license: MIT - -// Some text from the Golang standard library doc is adapted and -// reproduced in fragments below to document the expected behaviors -// of the interface functions Read()/Write()/ReadFrom()/WriteTo() that -// are implemented here. Those descriptions (see -// http://golang.org/pkg/io/#Reader for example) are -// copyright 2010 The Go Authors. - -import "io" - -// FixedSizeRingBuf: -// -// a fixed-size circular ring buffer. Yes, just what is says. -// -// We keep a pair of ping/pong buffers so that we can linearize -// the circular buffer into a contiguous slice if need be. -// -// For efficiency, a FixedSizeRingBuf may be vastly preferred to -// a bytes.Buffer. The ReadWithoutAdvance(), Advance(), and Adopt() -// methods are all non-standard methods written for speed. -// -// For an I/O heavy application, I have replaced bytes.Buffer with -// FixedSizeRingBuf and seen memory consumption go from 8GB to 25MB. -// Yes, that is a 300x reduction in memory footprint. Everything ran -// faster too. -// -// Note that Bytes(), while inescapable at times, is expensive: avoid -// it if possible. Instead it is better to use the FixedSizeRingBuf.Readable -// member to get the number of bytes available. Bytes() is expensive because -// it may copy the back and then the front of a wrapped buffer A[Use] -// into A[1-Use] in order to get a contiguous slice. If possible use ContigLen() -// first to get the size that can be read without copying, Read() that -// amount, and then Read() a second time -- to avoid the copy. - -type FixedSizeRingBuf struct { - A [2][]byte // a pair of ping/pong buffers. Only one is active. - Use int // which A buffer is in active use, 0 or 1 - N int // MaxViewInBytes, the size of A[0] and A[1] in bytes. - Beg int // start of data in A[Use] - Readable int // number of bytes available to read in A[Use] - - OneMade bool // lazily instantiate the [1] buffer. If we never call Bytes(), - // we may never need it. If OneMade is false, the Use must be = 0. -} - -func (b *FixedSizeRingBuf) Make2ndBuffer() { - if b.OneMade { - return - } - b.A[1] = make([]byte, b.N, b.N) - b.OneMade = true -} - -// get the length of the largest read that we can provide to a contiguous slice -// without an extra linearizing copy of all bytes internally. -func (b *FixedSizeRingBuf) ContigLen() int { - extent := b.Beg + b.Readable - firstContigLen := intMin(extent, b.N) - b.Beg - return firstContigLen -} - -func NewFixedSizeRingBuf(maxViewInBytes int) *FixedSizeRingBuf { - n := maxViewInBytes - r := &FixedSizeRingBuf{ - Use: 0, // 0 or 1, whichever is actually in use at the moment. - // If we are asked for Bytes() and we wrap, linearize into the other. - - N: n, - Beg: 0, - Readable: 0, - OneMade: false, - } - r.A[0] = make([]byte, n, n) - - // r.A[1] initialized lazily now. - - return r -} - -// from the standard library description of Bytes(): -// Bytes() returns a slice of the contents of the unread portion of the buffer. -// If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -// -func (b *FixedSizeRingBuf) Bytes() []byte { - - extent := b.Beg + b.Readable - if extent <= b.N { - // we fit contiguously in this buffer without wrapping to the other - return b.A[b.Use][b.Beg:(b.Beg + b.Readable)] - } - - // wrap into the other buffer - b.Make2ndBuffer() - - src := b.Use - dest := 1 - b.Use - - n := copy(b.A[dest], b.A[src][b.Beg:]) - n += copy(b.A[dest][n:], b.A[src][0:(extent%b.N)]) - - b.Use = dest - b.Beg = 0 - - return b.A[b.Use][:n] -} - -// Read(): -// -// from bytes.Buffer.Read(): Read reads the next len(p) bytes -// from the buffer or until the buffer is drained. The return -// value n is the number of bytes read. If the buffer has no data -// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil. -// -// from the description of the Reader interface, -// http://golang.org/pkg/io/#Reader -// -/* -Reader is the interface that wraps the basic Read method. - -Read reads up to len(p) bytes into p. It returns the number -of bytes read (0 <= n <= len(p)) and any error encountered. -Even if Read returns n < len(p), it may use all of p as scratch -space during the call. If some data is available but not -len(p) bytes, Read conventionally returns what is available -instead of waiting for more. - -When Read encounters an error or end-of-file condition after -successfully reading n > 0 bytes, it returns the number of bytes -read. It may return the (non-nil) error from the same call or -return the error (and n == 0) from a subsequent call. An instance -of this general case is that a Reader returning a non-zero number -of bytes at the end of the input stream may return -either err == EOF or err == nil. The next Read should -return 0, EOF regardless. - -Callers should always process the n > 0 bytes returned before -considering the error err. Doing so correctly handles I/O errors -that happen after reading some bytes and also both of the -allowed EOF behaviors. - -Implementations of Read are discouraged from returning a zero -byte count with a nil error, and callers should treat that -situation as a no-op. -*/ -// - -func (b *FixedSizeRingBuf) Read(p []byte) (n int, err error) { - return b.ReadAndMaybeAdvance(p, true) -} - -// if you want to Read the data and leave it in the buffer, so as -// to peek ahead for example. -func (b *FixedSizeRingBuf) ReadWithoutAdvance(p []byte) (n int, err error) { - return b.ReadAndMaybeAdvance(p, false) -} - -func (b *FixedSizeRingBuf) ReadAndMaybeAdvance(p []byte, doAdvance bool) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if b.Readable == 0 { - return 0, io.EOF - } - extent := b.Beg + b.Readable - if extent <= b.N { - n += copy(p, b.A[b.Use][b.Beg:extent]) - } else { - n += copy(p, b.A[b.Use][b.Beg:b.N]) - if n < len(p) { - n += copy(p[n:], b.A[b.Use][0:(extent%b.N)]) - } - } - if doAdvance { - b.Advance(n) - } - return -} - -// -// Write writes len(p) bytes from p to the underlying data stream. -// It returns the number of bytes written from p (0 <= n <= len(p)) -// and any error encountered that caused the write to stop early. -// Write must return a non-nil error if it returns n < len(p). -// -func (b *FixedSizeRingBuf) Write(p []byte) (n int, err error) { - for { - if len(p) == 0 { - // nothing (left) to copy in; notice we shorten our - // local copy p (below) as we read from it. - return - } - - writeCapacity := b.N - b.Readable - if writeCapacity <= 0 { - // we are all full up already. - return n, io.ErrShortWrite - } - if len(p) > writeCapacity { - err = io.ErrShortWrite - // leave err set and - // keep going, write what we can. - } - - writeStart := (b.Beg + b.Readable) % b.N - - upperLim := intMin(writeStart+writeCapacity, b.N) - - k := copy(b.A[b.Use][writeStart:upperLim], p) - - n += k - b.Readable += k - p = p[k:] - - // we can fill from b.A[b.Use][0:something] from - // p's remainder, so loop - } -} - -// WriteTo and ReadFrom avoid intermediate allocation and copies. - -// WriteTo writes data to w until there's no more data to write -// or when an error occurs. The return value n is the number of -// bytes written. Any error encountered during the write is also returned. -func (b *FixedSizeRingBuf) WriteTo(w io.Writer) (n int64, err error) { - - if b.Readable == 0 { - return 0, io.EOF - } - - extent := b.Beg + b.Readable - firstWriteLen := intMin(extent, b.N) - b.Beg - secondWriteLen := b.Readable - firstWriteLen - if firstWriteLen > 0 { - m, e := w.Write(b.A[b.Use][b.Beg:(b.Beg + firstWriteLen)]) - n += int64(m) - b.Advance(m) - - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != firstWriteLen { - return n, io.ErrShortWrite - } - } - if secondWriteLen > 0 { - m, e := w.Write(b.A[b.Use][0:secondWriteLen]) - n += int64(m) - b.Advance(m) - - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != secondWriteLen { - return n, io.ErrShortWrite - } - } - - return n, nil -} - -// ReadFrom() reads data from r until EOF or error. The return value n -// is the number of bytes read. Any error except io.EOF encountered -// during the read is also returned. -func (b *FixedSizeRingBuf) ReadFrom(r io.Reader) (n int64, err error) { - for { - writeCapacity := b.N - b.Readable - if writeCapacity <= 0 { - // we are all full - return n, nil - } - writeStart := (b.Beg + b.Readable) % b.N - upperLim := intMin(writeStart+writeCapacity, b.N) - - m, e := r.Read(b.A[b.Use][writeStart:upperLim]) - n += int64(m) - b.Readable += m - if e == io.EOF { - return n, nil - } - if e != nil { - return n, e - } - } -} - -func (b *FixedSizeRingBuf) Reset() { - b.Beg = 0 - b.Readable = 0 - b.Use = 0 -} - -// Advance(): non-standard, but better than Next(), -// because we don't have to unwrap our buffer and pay the cpu time -// for the copy that unwrapping may need. -// Useful in conjuction/after ReadWithoutAdvance() above. -func (b *FixedSizeRingBuf) Advance(n int) { - if n <= 0 { - return - } - if n > b.Readable { - n = b.Readable - } - b.Readable -= n - b.Beg = (b.Beg + n) % b.N -} - -// Adopt(): non-standard. -// -// For efficiency's sake, (possibly) take ownership of -// already allocated slice offered in me. -// -// If me is large we will adopt it, and we will potentially then -// write to the me buffer. -// If we already have a bigger buffer, copy me into the existing -// buffer instead. -func (b *FixedSizeRingBuf) Adopt(me []byte) { - n := len(me) - if n > b.N { - b.A[0] = me - b.OneMade = false - b.N = n - b.Use = 0 - b.Beg = 0 - b.Readable = n - } else { - // we already have a larger buffer, reuse it. - copy(b.A[0], me) - b.Use = 0 - b.Beg = 0 - b.Readable = n - } -} - -func intMax(a, b int) int { - if a > b { - return a - } else { - return b - } -} - -func intMin(a, b int) int { - if a < b { - return a - } else { - return b - } -} - -// Get the (beg, end] indices of the tailing empty buffer of bytes slice that from that is free for writing. -// Note: not guaranteed to be zeroed. At all. -func (b *FixedSizeRingBuf) GetEndmostWritable() (beg int, end int) { - extent := b.Beg + b.Readable - if extent < b.N { - return extent, b.N - } - - return extent % b.N, b.Beg -} - -// Note: not guaranteed to be zeroed. -func (b *FixedSizeRingBuf) GetEndmostWritableSlice() []byte { - beg, e := b.GetEndmostWritable() - return b.A[b.Use][beg:e] -} diff --git a/vendor/github.com/glycerine/go-unsnap-stream/snap.go b/vendor/github.com/glycerine/go-unsnap-stream/snap.go deleted file mode 100644 index 12a8d40b5..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/snap.go +++ /dev/null @@ -1,100 +0,0 @@ -package unsnap - -import ( - "encoding/binary" - - // no c lib dependency - snappy "github.com/golang/snappy" - // or, use the C wrapper for speed - //snappy "github.com/dgryski/go-csnappy" -) - -// add Write() method for SnappyFile (see unsnap.go) - -// reference for snappy framing/streaming format: -// http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -// ?spec=svn68&r=71 - -// -// Write writes len(p) bytes from p to the underlying data stream. -// It returns the number of bytes written from p (0 <= n <= len(p)) and -// any error encountered that caused the write to stop early. Write -// must return a non-nil error if it returns n < len(p). -// -func (sf *SnappyFile) Write(p []byte) (n int, err error) { - - if sf.SnappyEncodeDecodeOff { - return sf.Writer.Write(p) - } - - if !sf.Writing { - panic("Writing on a read-only SnappyFile") - } - - // encoding in snappy can apparently go beyond the original size, beware. - // so our buffers must be sized 2*max snappy chunk => 2 * CHUNK_MAX(65536) - - sf.DecBuf.Reset() - sf.EncBuf.Reset() - - if !sf.HeaderChunkWritten { - sf.HeaderChunkWritten = true - _, err = sf.Writer.Write(SnappyStreamHeaderMagic) - if err != nil { - return - } - } - var chunk []byte - var chunk_type byte - var crc uint32 - - for len(p) > 0 { - - // chunk points to input p by default, unencoded input. - chunk = p[:IntMin(len(p), CHUNK_MAX)] - crc = masked_crc32c(chunk) - - writeme := chunk[:] - - // first write to EncBuf, as a temp, in case we want - // to discard and send uncompressed instead. - compressed_chunk := snappy.Encode(sf.EncBuf.GetEndmostWritableSlice(), chunk) - - if len(compressed_chunk) <= int((1-_COMPRESSION_THRESHOLD)*float64(len(chunk))) { - writeme = compressed_chunk - chunk_type = _COMPRESSED_CHUNK - } else { - // keep writeme pointing at original chunk (uncompressed) - chunk_type = _UNCOMPRESSED_CHUNK - } - - const crc32Sz = 4 - var tag32 uint32 = uint32(chunk_type) + (uint32(len(writeme)+crc32Sz) << 8) - - err = binary.Write(sf.Writer, binary.LittleEndian, tag32) - if err != nil { - return - } - - err = binary.Write(sf.Writer, binary.LittleEndian, crc) - if err != nil { - return - } - - _, err = sf.Writer.Write(writeme) - if err != nil { - return - } - - n += len(chunk) - p = p[len(chunk):] - } - return n, nil -} - -func IntMin(a int, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt b/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt deleted file mode 100644 index 5f5027939..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt +++ /dev/null @@ -1 +0,0 @@ -hello_snappy diff --git a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy b/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy deleted file mode 100644 index ba45ecd42..000000000 Binary files a/vendor/github.com/glycerine/go-unsnap-stream/unenc.txt.snappy and /dev/null differ diff --git a/vendor/github.com/glycerine/go-unsnap-stream/unsnap.go b/vendor/github.com/glycerine/go-unsnap-stream/unsnap.go deleted file mode 100644 index 8789445c9..000000000 --- a/vendor/github.com/glycerine/go-unsnap-stream/unsnap.go +++ /dev/null @@ -1,513 +0,0 @@ -package unsnap - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "os" - - "hash/crc32" - - snappy "github.com/golang/snappy" - // The C library can be used, but this makes the binary dependent - // lots of extraneous c-libraries; it is no longer stand-alone. Yuck. - // - // Therefore we comment out the "dgryski/go-csnappy" path and use the - // "github.com/golang/snappy/snappy" above instead. If you are - // performance limited and can deal with distributing more libraries, - // then this is easy to swap. - // - // If you swap, note that some of the tests won't pass - // because snappy-go produces slightly different (but still - // conformant) encodings on some data. Here are bindings - // to the C-snappy: - // snappy "github.com/dgryski/go-csnappy" -) - -// SnappyFile: create a drop-in-replacement/wrapper for an *os.File that handles doing the unsnappification online as more is read from it - -type SnappyFile struct { - Fname string - - Reader io.Reader - Writer io.Writer - - // allow clients to substitute us for an os.File and just switch - // off compression if they don't want it. - SnappyEncodeDecodeOff bool // if true, we bypass straight to Filep - - EncBuf FixedSizeRingBuf // holds any extra that isn't yet returned, encoded - DecBuf FixedSizeRingBuf // holds any extra that isn't yet returned, decoded - - // for writing to stream-framed snappy - HeaderChunkWritten bool - - // Sanity check: we can only read, or only write, to one SnappyFile. - // EncBuf and DecBuf are used differently in each mode. Verify - // that we are consistent with this flag. - Writing bool -} - -var total int - -// for debugging, show state of buffers -func (f *SnappyFile) Dump() { - fmt.Printf("EncBuf has length %d and contents:\n%s\n", len(f.EncBuf.Bytes()), string(f.EncBuf.Bytes())) - fmt.Printf("DecBuf has length %d and contents:\n%s\n", len(f.DecBuf.Bytes()), string(f.DecBuf.Bytes())) -} - -func (f *SnappyFile) Read(p []byte) (n int, err error) { - - if f.SnappyEncodeDecodeOff { - return f.Reader.Read(p) - } - - if f.Writing { - panic("Reading on a write-only SnappyFile") - } - - // before we unencrypt more, try to drain the DecBuf first - n, _ = f.DecBuf.Read(p) - if n > 0 { - total += n - return n, nil - } - - //nEncRead, nDecAdded, err := UnsnapOneFrame(f.Filep, &f.EncBuf, &f.DecBuf, f.Fname) - _, _, err = UnsnapOneFrame(f.Reader, &f.EncBuf, &f.DecBuf, f.Fname) - if err != nil && err != io.EOF { - panic(err) - } - - n, _ = f.DecBuf.Read(p) - - if n > 0 { - total += n - return n, nil - } - if f.DecBuf.Readable == 0 { - if f.DecBuf.Readable == 0 && f.EncBuf.Readable == 0 { - // only now (when EncBuf is empty) can we give io.EOF. - // Any earlier, and we leave stuff un-decoded! - return 0, io.EOF - } - } - return 0, nil -} - -func Open(name string) (file *SnappyFile, err error) { - fp, err := os.Open(name) - if err != nil { - return nil, err - } - // encoding in snappy can apparently go beyond the original size, so - // we make our buffers big enough, 2*max snappy chunk => 2 * CHUNK_MAX(65536) - - snap := NewReader(fp) - snap.Fname = name - return snap, nil -} - -func NewReader(r io.Reader) *SnappyFile { - return &SnappyFile{ - Reader: r, - EncBuf: *NewFixedSizeRingBuf(CHUNK_MAX * 2), // buffer of snappy encoded bytes - DecBuf: *NewFixedSizeRingBuf(CHUNK_MAX * 2), // buffer of snapppy decoded bytes - Writing: false, - } -} - -func NewWriter(w io.Writer) *SnappyFile { - return &SnappyFile{ - Writer: w, - EncBuf: *NewFixedSizeRingBuf(65536), // on writing: temp for testing compression - DecBuf: *NewFixedSizeRingBuf(65536 * 2), // on writing: final buffer of snappy framed and encoded bytes - Writing: true, - } -} - -func Create(name string) (file *SnappyFile, err error) { - fp, err := os.Create(name) - if err != nil { - return nil, err - } - snap := NewWriter(fp) - snap.Fname = name - return snap, nil -} - -func (f *SnappyFile) Close() error { - if f.Writing { - wc, ok := f.Writer.(io.WriteCloser) - if ok { - return wc.Close() - } - return nil - } - rc, ok := f.Reader.(io.ReadCloser) - if ok { - return rc.Close() - } - return nil -} - -func (f *SnappyFile) Sync() error { - file, ok := f.Writer.(*os.File) - if ok { - return file.Sync() - } - return nil -} - -// for an increment of a frame at a time: -// read from r into encBuf (encBuf is still encoded, thus the name), and write unsnappified frames into outDecodedBuf -// the returned n: number of bytes read from the encrypted encBuf -func UnsnapOneFrame(r io.Reader, encBuf *FixedSizeRingBuf, outDecodedBuf *FixedSizeRingBuf, fname string) (nEnc int64, nDec int64, err error) { - // b, err := ioutil.ReadAll(r) - // if err != nil { - // panic(err) - // } - - nEnc = 0 - nDec = 0 - - // read up to 65536 bytes from r into encBuf, at least a snappy frame - nread, err := io.CopyN(encBuf, r, 65536) // returns nwrotebytes, err - nEnc += nread - if err != nil { - if err == io.EOF { - if nread == 0 { - if encBuf.Readable == 0 { - return nEnc, nDec, io.EOF - } - // else we have bytes in encBuf, so decode them! - err = nil - } else { - // continue below, processing the nread bytes - err = nil - } - } else { - panic(err) - } - } - - // flag for printing chunk size alignment messages - verbose := false - - const snappyStreamHeaderSz = 10 - const headerSz = 4 - const crc32Sz = 4 - // the magic 18 bytes accounts for the snappy streaming header and the first chunks size and checksum - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - - chunk := (*encBuf).Bytes() - - // however we exit, advance as - // defer func() { (*encBuf).Next(N) }() - - // 65536 is the max size of a snappy framed chunk. See - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt:91 - // buf := make([]byte, 65536) - - // fmt.Printf("read from file, b is len:%d with value: %#v\n", len(b), b) - // fmt.Printf("read from file, bcut is len:%d with value: %#v\n", len(bcut), bcut) - - //fmt.Printf("raw bytes of chunksz are: %v\n", b[11:14]) - - fourbytes := make([]byte, 4) - chunkCount := 0 - - for nDec < 65536 { - if len(chunk) == 0 { - break - } - chunkCount++ - fourbytes[3] = 0 - copy(fourbytes, chunk[1:4]) - chunksz := binary.LittleEndian.Uint32(fourbytes) - chunk_type := chunk[0] - - switch true { - case chunk_type == 0xff: - { // stream identifier - - streamHeader := chunk[:snappyStreamHeaderSz] - if 0 != bytes.Compare(streamHeader, []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59}) { - panic("file had chunk starting with 0xff but then no magic snappy streaming protocol bytes, aborting.") - } else { - //fmt.Printf("got streaming snappy magic header just fine.\n") - } - chunk = chunk[snappyStreamHeaderSz:] - (*encBuf).Advance(snappyStreamHeaderSz) - nEnc += snappyStreamHeaderSz - continue - } - case chunk_type == 0x00: - { // compressed data - if verbose { - fmt.Fprintf(os.Stderr, "chunksz is %d while total bytes avail are: %d\n", int(chunksz), len(chunk)-4) - } - - crc := binary.LittleEndian.Uint32(chunk[headerSz:(headerSz + crc32Sz)]) - section := chunk[(headerSz + crc32Sz):(headerSz + chunksz)] - - dec, ok := snappy.Decode(nil, section) - if ok != nil { - // we've probably truncated a snappy frame at this point - // ok=snappy: corrupt input - // len(dec) == 0 - // - panic(fmt.Sprintf("could not decode snappy stream: '%s' and len dec=%d and ok=%v\n", fname, len(dec), ok)) - - // get back to caller with what we've got so far - return nEnc, nDec, nil - } - // fmt.Printf("ok, b is %#v , %#v\n", ok, dec) - - // spit out decoded text - // n, err := w.Write(dec) - //fmt.Printf("len(dec) = %d, outDecodedBuf.Readable=%d\n", len(dec), outDecodedBuf.Readable) - bnb := bytes.NewBuffer(dec) - n, err := io.Copy(outDecodedBuf, bnb) - if err != nil { - //fmt.Printf("got n=%d, err= %s ; when trying to io.Copy(outDecodedBuf: N=%d, Readable=%d)\n", n, err, outDecodedBuf.N, outDecodedBuf.Readable) - panic(err) - } - if n != int64(len(dec)) { - panic("could not write all bytes to outDecodedBuf") - } - nDec += n - - // verify the crc32 rotated checksum - m32 := masked_crc32c(dec) - if m32 != crc { - panic(fmt.Sprintf("crc32 masked failiure. expected: %v but got: %v", crc, m32)) - } else { - //fmt.Printf("\nchecksums match: %v == %v\n", crc, m32) - } - - // move to next header - inc := (headerSz + int(chunksz)) - chunk = chunk[inc:] - (*encBuf).Advance(inc) - nEnc += int64(inc) - continue - } - case chunk_type == 0x01: - { // uncompressed data - - //n, err := w.Write(chunk[(headerSz+crc32Sz):(headerSz + int(chunksz))]) - n, err := io.Copy(outDecodedBuf, bytes.NewBuffer(chunk[(headerSz+crc32Sz):(headerSz+int(chunksz))])) - if verbose { - //fmt.Printf("debug: n=%d err=%v chunksz=%d outDecodedBuf='%v'\n", n, err, chunksz, outDecodedBuf) - } - if err != nil { - panic(err) - } - if n != int64(chunksz-crc32Sz) { - panic("could not write all bytes to stdout") - } - nDec += n - - inc := (headerSz + int(chunksz)) - chunk = chunk[inc:] - (*encBuf).Advance(inc) - nEnc += int64(inc) - continue - } - case chunk_type == 0xfe: - fallthrough // padding, just skip it - case chunk_type >= 0x80 && chunk_type <= 0xfd: - { // Reserved skippable chunks - //fmt.Printf("\nin reserved skippable chunks, at nEnc=%v\n", nEnc) - inc := (headerSz + int(chunksz)) - chunk = chunk[inc:] - nEnc += int64(inc) - (*encBuf).Advance(inc) - continue - } - - default: - panic(fmt.Sprintf("unrecognized/unsupported chunk type %#v", chunk_type)) - } - - } // end for{} - - return nEnc, nDec, err - //return int64(N), nil -} - -// for whole file at once: -// -// receive on stdin a stream of bytes in the snappy-streaming framed -// format, defined here: http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt -// Grab each frame, run it through the snappy decoder, and spit out -// each frame all joined back-to-back on stdout. -// -func Unsnappy(r io.Reader, w io.Writer) (err error) { - b, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - - // flag for printing chunk size alignment messages - verbose := false - - const snappyStreamHeaderSz = 10 - const headerSz = 4 - const crc32Sz = 4 - // the magic 18 bytes accounts for the snappy streaming header and the first chunks size and checksum - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - - chunk := b[:] - - // 65536 is the max size of a snappy framed chunk. See - // http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt:91 - //buf := make([]byte, 65536) - - // fmt.Printf("read from file, b is len:%d with value: %#v\n", len(b), b) - // fmt.Printf("read from file, bcut is len:%d with value: %#v\n", len(bcut), bcut) - - //fmt.Printf("raw bytes of chunksz are: %v\n", b[11:14]) - - fourbytes := make([]byte, 4) - chunkCount := 0 - - for { - if len(chunk) == 0 { - break - } - chunkCount++ - fourbytes[3] = 0 - copy(fourbytes, chunk[1:4]) - chunksz := binary.LittleEndian.Uint32(fourbytes) - chunk_type := chunk[0] - - switch true { - case chunk_type == 0xff: - { // stream identifier - - streamHeader := chunk[:snappyStreamHeaderSz] - if 0 != bytes.Compare(streamHeader, []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59}) { - panic("file had chunk starting with 0xff but then no magic snappy streaming protocol bytes, aborting.") - } else { - //fmt.Printf("got streaming snappy magic header just fine.\n") - } - chunk = chunk[snappyStreamHeaderSz:] - continue - } - case chunk_type == 0x00: - { // compressed data - if verbose { - fmt.Fprintf(os.Stderr, "chunksz is %d while total bytes avail are: %d\n", int(chunksz), len(chunk)-4) - } - - //crc := binary.LittleEndian.Uint32(chunk[headerSz:(headerSz + crc32Sz)]) - section := chunk[(headerSz + crc32Sz):(headerSz + chunksz)] - - dec, ok := snappy.Decode(nil, section) - if ok != nil { - panic("could not decode snappy stream") - } - // fmt.Printf("ok, b is %#v , %#v\n", ok, dec) - - // spit out decoded text - n, err := w.Write(dec) - if err != nil { - panic(err) - } - if n != len(dec) { - panic("could not write all bytes to stdout") - } - - // TODO: verify the crc32 rotated checksum? - - // move to next header - chunk = chunk[(headerSz + int(chunksz)):] - continue - } - case chunk_type == 0x01: - { // uncompressed data - - //crc := binary.LittleEndian.Uint32(chunk[headerSz:(headerSz + crc32Sz)]) - section := chunk[(headerSz + crc32Sz):(headerSz + chunksz)] - - n, err := w.Write(section) - if err != nil { - panic(err) - } - if n != int(chunksz-crc32Sz) { - panic("could not write all bytes to stdout") - } - - chunk = chunk[(headerSz + int(chunksz)):] - continue - } - case chunk_type == 0xfe: - fallthrough // padding, just skip it - case chunk_type >= 0x80 && chunk_type <= 0xfd: - { // Reserved skippable chunks - chunk = chunk[(headerSz + int(chunksz)):] - continue - } - - default: - panic(fmt.Sprintf("unrecognized/unsupported chunk type %#v", chunk_type)) - } - - } // end for{} - - return nil -} - -// 0xff 0x06 0x00 0x00 sNaPpY -var SnappyStreamHeaderMagic = []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59} - -const CHUNK_MAX = 65536 -const _STREAM_TO_STREAM_BLOCK_SIZE = CHUNK_MAX -const _STREAM_IDENTIFIER = `sNaPpY` -const _COMPRESSED_CHUNK = 0x00 -const _UNCOMPRESSED_CHUNK = 0x01 -const _IDENTIFIER_CHUNK = 0xff -const _RESERVED_UNSKIPPABLE0 = 0x02 // chunk ranges are [inclusive, exclusive) -const _RESERVED_UNSKIPPABLE1 = 0x80 -const _RESERVED_SKIPPABLE0 = 0x80 -const _RESERVED_SKIPPABLE1 = 0xff - -// the minimum percent of bytes compression must save to be enabled in automatic -// mode -const _COMPRESSION_THRESHOLD = .125 - -var crctab *crc32.Table - -func init() { - crctab = crc32.MakeTable(crc32.Castagnoli) // this is correct table, matches the crc32c.c code used by python -} - -func masked_crc32c(data []byte) uint32 { - - // see the framing format specification, http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - var crc uint32 = crc32.Checksum(data, crctab) - return (uint32((crc>>15)|(crc<<17)) + 0xa282ead8) -} - -func ReadSnappyStreamCompressedFile(filename string) ([]byte, error) { - - snappyFile, err := Open(filename) - if err != nil { - return []byte{}, err - } - - var bb bytes.Buffer - _, err = bb.ReadFrom(snappyFile) - if err == io.EOF { - err = nil - } - if err != nil { - panic(err) - } - - return bb.Bytes(), err -} diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 12411127b..000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml deleted file mode 100644 index c8ea49ccc..000000000 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - -script: - - go get golang.org/x/tools/cmd/cover - - go get github.com/smartystreets/goconvey - - mkdir -p $HOME/gopath/src/gopkg.in - - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1 - - cd $HOME/gopath/src/gopkg.in/ini.v1 - - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbcdf..000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index af27ff076..000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index ae4dfc3a5..000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,46 +0,0 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -The minimum requirement of Go is **1.6**. - -To use a tagged revision: - -```sh -$ go get gopkg.in/ini.v1 -``` - -To use with latest changes: - -```sh -$ go get github.com/go-ini/ini -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe7431..000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index 0ed0eafd0..000000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // Actual data is stored here. - sections map[string]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - if len(opts.KeyValueDelimiters) == 0 { - opts.KeyValueDelimiters = "=:" - } - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := DefaultFormatLeft + "=" + DefaultFormatRight - - if PrettyFormat || PrettyEqual { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + strings.TrimSpace(lines[i]) - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err := buf.WriteString(kname); err != nil { - return nil, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return nil, err - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 93424f671..000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,217 +0,0 @@ -// +build go1.6 - -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" -) - -const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DEFAULT_SECTION = "DEFAULT" - - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - _VERSION = "1.41.0" -) - -// Version returns current package version literal. -func Version() string { - return _VERSION -} - -var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. - LineBreak = "\n" - - // Place custom spaces when PrettyFormat and PrettyEqual are both disabled - DefaultFormatLeft = "" - DefaultFormatRight = "" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - - // Place spaces around "=" sign even when PrettyFormat is false - PrettyEqual = false - - // Explicitly write DEFAULT section header - DefaultHeader = false - - // Indicate whether to put a line between sections - PrettySection = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string - // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". - KeyValueDelimiters string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 0fee0dc7e..000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,752 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - return []string{k.value} - } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := vr[2 : len(vr)-2] - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - v, err := strconv.ParseInt(k.String(), 0, 64) - return int(v), err -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 0, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 0, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 0, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx += 1 - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - for _, str := range strs { - valInt64, err := strconv.ParseInt(str, 0, 64) - val := int(valInt64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 0, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 0, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 0, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 24cf11c73..000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(delimiters string, in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], delimiters) - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, delimiters) - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, - parserBufferSize int, - ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if unescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if unescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !ignoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !ignoreInlineComment { - var i int - if spaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && unescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } - } else if allowPythonMultilines && lastChar == '\n' { - parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - val := line - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil { - if peekErr == io.EOF { - return val, nil - } - return "", peekErr - } - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - if len(peekMatches) != 3 { - return val, nil - } - - // NOTE: Return if not a python-ini multi-line value. - currentIdentSize := len(peekMatches[1]) - if currentIdentSize <= 0 { - return val, nil - } - - // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.readUntil('\n') - if err != nil { - return "", err - } - - val += fmt.Sprintf("\n%s", peekMatches[2]) - } - } - - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DEFAULT_SECTION - if f.options.Insensitive { - name = strings.ToLower(DEFAULT_SECTION) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 1kb at a time. - currentPeekSize := 1024 - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) { - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index bc32c620d..000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -// TODO: delete me in v2 -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - delete(s.keysHash, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + "." - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index a9dfed078..000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && uint64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetUint(uintVal) - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { - opts := strings.SplitN(tag, ",", 3) - rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" - } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - return rawName, omitEmpty, allowShadow -} - -func (s *Section) mapTo(val reflect.Value, isStrict bool) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, false) -} - -// MapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a2..000000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208..000000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90da..000000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile deleted file mode 100644 index 0b4659b73..000000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Protocol Buffers for Go with Gadgets -# -# Copyright (c) 2013, The GoGo Authors. All rights reserved. -# http://github.com/gogo/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto - -restore: - cp gogo.pb.golden gogo.pb.go - -preserve: - cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go deleted file mode 100644 index 081c86fa8..000000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package gogoproto provides extensions for protocol buffers to achieve: - - - fast marshalling and unmarshalling. - - peace of mind by optionally generating test and benchmark code. - - more canonical Go structures. - - less typing by optionally generating extra helper code. - - goprotobuf compatibility - -More Canonical Go Structures - -A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. -You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. -Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. - - - nullable, if false, a field is generated without a pointer (see warning below). - - embed, if true, the field is generated as an embedded field. - - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 - - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. - - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. - - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - -Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. - -Let us look at: - - github.com/gogo/protobuf/test/example/example.proto - -for a quicker overview. - -The following message: - - package test; - - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - - message A { - optional string Description = 1 [(gogoproto.nullable) = false]; - optional int64 Number = 2 [(gogoproto.nullable) = false]; - optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; - } - -Will generate a go struct which looks a lot like this: - - type A struct { - Description string - Number int64 - Id github_com_gogo_protobuf_test_custom.Uuid - } - -You will see there are no pointers, since all fields are non-nullable. -You will also see a custom type which marshals to a string. -Be warned it is your responsibility to test your custom types thoroughly. -You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. - -Next we will embed the message A in message B. - - message B { - optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; - repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; - } - -See below that A is embedded in B. - - type B struct { - A - G []github_com_gogo_protobuf_test_custom.Uint128 - } - -Also see the repeated custom type. - - type Uint128 [2]uint64 - -Next we will create a custom name for one of our fields. - - message C { - optional int64 size = 1 [(gogoproto.customname) = "MySize"]; - } - -See below that the field's name is MySize and not Size. - - type C struct { - MySize *int64 - } - -The is useful when having a protocol buffer message with a field name which conflicts with a generated method. -As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. -Using customname you can fix this error without changing the field name. -This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. - -Gogoprotobuf also has some more subtle changes, these could be changed back: - - - the generated package name for imports do not have the extra /filename.pb, - but are actually the imports specified in the .proto file. - -Gogoprotobuf also has lost some features which should be brought back with time: - - - Marshalling and unmarshalling with reflect and without the unsafe package, - this requires work in pointer_reflect.go - -Why does nullable break protocol buffer specifications: - -The protocol buffer specification states, somewhere, that you should be able to tell whether a -field is set or unset. With the option nullable=false this feature is lost, -since your non-nullable fields will always be set. It can be seen as a layer on top of -protocol buffers, where before and after marshalling all non-nullable fields are set -and they cannot be unset. - -Goprotobuf Compatibility: - -Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. -Gogoprotobuf generates the same code as goprotobuf if no extensions are used. -The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: - - - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. - - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix - - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. - - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face - - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. - - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). - -Less Typing and Peace of Mind is explained in their specific plugin folders godoc: - - - github.com/gogo/protobuf/plugin/ - -If you do not use any of these extension the code that is generated -will be the same as if goprotobuf has generated it. - -The most complete way to see examples is to look at - - github.com/gogo/protobuf/test/thetest.proto - -Gogoprototest is a seperate project, -because we want to keep gogoprotobuf independent of goprotobuf, -but we still want to test it thoroughly. - -*/ -package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go deleted file mode 100644 index e352808b9..000000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ /dev/null @@ -1,874 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gogo.proto - -package gogoproto - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62001, - Name: "gogoproto.goproto_enum_prefix", - Tag: "varint,62001,opt,name=goproto_enum_prefix", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62021, - Name: "gogoproto.goproto_enum_stringer", - Tag: "varint,62021,opt,name=goproto_enum_stringer", - Filename: "gogo.proto", -} - -var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62022, - Name: "gogoproto.enum_stringer", - Tag: "varint,62022,opt,name=enum_stringer", - Filename: "gogo.proto", -} - -var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*string)(nil), - Field: 62023, - Name: "gogoproto.enum_customname", - Tag: "bytes,62023,opt,name=enum_customname", - Filename: "gogo.proto", -} - -var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62024, - Name: "gogoproto.enumdecl", - Tag: "varint,62024,opt,name=enumdecl", - Filename: "gogo.proto", -} - -var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumValueOptions)(nil), - ExtensionType: (*string)(nil), - Field: 66001, - Name: "gogoproto.enumvalue_customname", - Tag: "bytes,66001,opt,name=enumvalue_customname", - Filename: "gogo.proto", -} - -var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63001, - Name: "gogoproto.goproto_getters_all", - Tag: "varint,63001,opt,name=goproto_getters_all", - Filename: "gogo.proto", -} - -var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63002, - Name: "gogoproto.goproto_enum_prefix_all", - Tag: "varint,63002,opt,name=goproto_enum_prefix_all", - Filename: "gogo.proto", -} - -var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63003, - Name: "gogoproto.goproto_stringer_all", - Tag: "varint,63003,opt,name=goproto_stringer_all", - Filename: "gogo.proto", -} - -var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63004, - Name: "gogoproto.verbose_equal_all", - Tag: "varint,63004,opt,name=verbose_equal_all", - Filename: "gogo.proto", -} - -var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63005, - Name: "gogoproto.face_all", - Tag: "varint,63005,opt,name=face_all", - Filename: "gogo.proto", -} - -var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63006, - Name: "gogoproto.gostring_all", - Tag: "varint,63006,opt,name=gostring_all", - Filename: "gogo.proto", -} - -var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63007, - Name: "gogoproto.populate_all", - Tag: "varint,63007,opt,name=populate_all", - Filename: "gogo.proto", -} - -var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63008, - Name: "gogoproto.stringer_all", - Tag: "varint,63008,opt,name=stringer_all", - Filename: "gogo.proto", -} - -var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63009, - Name: "gogoproto.onlyone_all", - Tag: "varint,63009,opt,name=onlyone_all", - Filename: "gogo.proto", -} - -var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63013, - Name: "gogoproto.equal_all", - Tag: "varint,63013,opt,name=equal_all", - Filename: "gogo.proto", -} - -var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63014, - Name: "gogoproto.description_all", - Tag: "varint,63014,opt,name=description_all", - Filename: "gogo.proto", -} - -var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63015, - Name: "gogoproto.testgen_all", - Tag: "varint,63015,opt,name=testgen_all", - Filename: "gogo.proto", -} - -var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63016, - Name: "gogoproto.benchgen_all", - Tag: "varint,63016,opt,name=benchgen_all", - Filename: "gogo.proto", -} - -var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63017, - Name: "gogoproto.marshaler_all", - Tag: "varint,63017,opt,name=marshaler_all", - Filename: "gogo.proto", -} - -var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63018, - Name: "gogoproto.unmarshaler_all", - Tag: "varint,63018,opt,name=unmarshaler_all", - Filename: "gogo.proto", -} - -var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63019, - Name: "gogoproto.stable_marshaler_all", - Tag: "varint,63019,opt,name=stable_marshaler_all", - Filename: "gogo.proto", -} - -var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63020, - Name: "gogoproto.sizer_all", - Tag: "varint,63020,opt,name=sizer_all", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63021, - Name: "gogoproto.goproto_enum_stringer_all", - Tag: "varint,63021,opt,name=goproto_enum_stringer_all", - Filename: "gogo.proto", -} - -var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63022, - Name: "gogoproto.enum_stringer_all", - Tag: "varint,63022,opt,name=enum_stringer_all", - Filename: "gogo.proto", -} - -var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63023, - Name: "gogoproto.unsafe_marshaler_all", - Tag: "varint,63023,opt,name=unsafe_marshaler_all", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63024, - Name: "gogoproto.unsafe_unmarshaler_all", - Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63025, - Name: "gogoproto.goproto_extensions_map_all", - Tag: "varint,63025,opt,name=goproto_extensions_map_all", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63026, - Name: "gogoproto.goproto_unrecognized_all", - Tag: "varint,63026,opt,name=goproto_unrecognized_all", - Filename: "gogo.proto", -} - -var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63027, - Name: "gogoproto.gogoproto_import", - Tag: "varint,63027,opt,name=gogoproto_import", - Filename: "gogo.proto", -} - -var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63028, - Name: "gogoproto.protosizer_all", - Tag: "varint,63028,opt,name=protosizer_all", - Filename: "gogo.proto", -} - -var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63029, - Name: "gogoproto.compare_all", - Tag: "varint,63029,opt,name=compare_all", - Filename: "gogo.proto", -} - -var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63030, - Name: "gogoproto.typedecl_all", - Tag: "varint,63030,opt,name=typedecl_all", - Filename: "gogo.proto", -} - -var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63031, - Name: "gogoproto.enumdecl_all", - Tag: "varint,63031,opt,name=enumdecl_all", - Filename: "gogo.proto", -} - -var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63032, - Name: "gogoproto.goproto_registration", - Tag: "varint,63032,opt,name=goproto_registration", - Filename: "gogo.proto", -} - -var E_MessagenameAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63033, - Name: "gogoproto.messagename_all", - Tag: "varint,63033,opt,name=messagename_all", - Filename: "gogo.proto", -} - -var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63034, - Name: "gogoproto.goproto_sizecache_all", - Tag: "varint,63034,opt,name=goproto_sizecache_all", - Filename: "gogo.proto", -} - -var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63035, - Name: "gogoproto.goproto_unkeyed_all", - Tag: "varint,63035,opt,name=goproto_unkeyed_all", - Filename: "gogo.proto", -} - -var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64001, - Name: "gogoproto.goproto_getters", - Tag: "varint,64001,opt,name=goproto_getters", - Filename: "gogo.proto", -} - -var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64003, - Name: "gogoproto.goproto_stringer", - Tag: "varint,64003,opt,name=goproto_stringer", - Filename: "gogo.proto", -} - -var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64004, - Name: "gogoproto.verbose_equal", - Tag: "varint,64004,opt,name=verbose_equal", - Filename: "gogo.proto", -} - -var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64005, - Name: "gogoproto.face", - Tag: "varint,64005,opt,name=face", - Filename: "gogo.proto", -} - -var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64006, - Name: "gogoproto.gostring", - Tag: "varint,64006,opt,name=gostring", - Filename: "gogo.proto", -} - -var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64007, - Name: "gogoproto.populate", - Tag: "varint,64007,opt,name=populate", - Filename: "gogo.proto", -} - -var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 67008, - Name: "gogoproto.stringer", - Tag: "varint,67008,opt,name=stringer", - Filename: "gogo.proto", -} - -var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64009, - Name: "gogoproto.onlyone", - Tag: "varint,64009,opt,name=onlyone", - Filename: "gogo.proto", -} - -var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64013, - Name: "gogoproto.equal", - Tag: "varint,64013,opt,name=equal", - Filename: "gogo.proto", -} - -var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64014, - Name: "gogoproto.description", - Tag: "varint,64014,opt,name=description", - Filename: "gogo.proto", -} - -var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64015, - Name: "gogoproto.testgen", - Tag: "varint,64015,opt,name=testgen", - Filename: "gogo.proto", -} - -var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64016, - Name: "gogoproto.benchgen", - Tag: "varint,64016,opt,name=benchgen", - Filename: "gogo.proto", -} - -var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64017, - Name: "gogoproto.marshaler", - Tag: "varint,64017,opt,name=marshaler", - Filename: "gogo.proto", -} - -var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64018, - Name: "gogoproto.unmarshaler", - Tag: "varint,64018,opt,name=unmarshaler", - Filename: "gogo.proto", -} - -var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64019, - Name: "gogoproto.stable_marshaler", - Tag: "varint,64019,opt,name=stable_marshaler", - Filename: "gogo.proto", -} - -var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64020, - Name: "gogoproto.sizer", - Tag: "varint,64020,opt,name=sizer", - Filename: "gogo.proto", -} - -var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64023, - Name: "gogoproto.unsafe_marshaler", - Tag: "varint,64023,opt,name=unsafe_marshaler", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64024, - Name: "gogoproto.unsafe_unmarshaler", - Tag: "varint,64024,opt,name=unsafe_unmarshaler", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64025, - Name: "gogoproto.goproto_extensions_map", - Tag: "varint,64025,opt,name=goproto_extensions_map", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64026, - Name: "gogoproto.goproto_unrecognized", - Tag: "varint,64026,opt,name=goproto_unrecognized", - Filename: "gogo.proto", -} - -var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64028, - Name: "gogoproto.protosizer", - Tag: "varint,64028,opt,name=protosizer", - Filename: "gogo.proto", -} - -var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64029, - Name: "gogoproto.compare", - Tag: "varint,64029,opt,name=compare", - Filename: "gogo.proto", -} - -var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64030, - Name: "gogoproto.typedecl", - Tag: "varint,64030,opt,name=typedecl", - Filename: "gogo.proto", -} - -var E_Messagename = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64033, - Name: "gogoproto.messagename", - Tag: "varint,64033,opt,name=messagename", - Filename: "gogo.proto", -} - -var E_GoprotoSizecache = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64034, - Name: "gogoproto.goproto_sizecache", - Tag: "varint,64034,opt,name=goproto_sizecache", - Filename: "gogo.proto", -} - -var E_GoprotoUnkeyed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64035, - Name: "gogoproto.goproto_unkeyed", - Tag: "varint,64035,opt,name=goproto_unkeyed", - Filename: "gogo.proto", -} - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65001, - Name: "gogoproto.nullable", - Tag: "varint,65001,opt,name=nullable", - Filename: "gogo.proto", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65002, - Name: "gogoproto.embed", - Tag: "varint,65002,opt,name=embed", - Filename: "gogo.proto", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65003, - Name: "gogoproto.customtype", - Tag: "bytes,65003,opt,name=customtype", - Filename: "gogo.proto", -} - -var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65004, - Name: "gogoproto.customname", - Tag: "bytes,65004,opt,name=customname", - Filename: "gogo.proto", -} - -var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65005, - Name: "gogoproto.jsontag", - Tag: "bytes,65005,opt,name=jsontag", - Filename: "gogo.proto", -} - -var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65006, - Name: "gogoproto.moretags", - Tag: "bytes,65006,opt,name=moretags", - Filename: "gogo.proto", -} - -var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65007, - Name: "gogoproto.casttype", - Tag: "bytes,65007,opt,name=casttype", - Filename: "gogo.proto", -} - -var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65008, - Name: "gogoproto.castkey", - Tag: "bytes,65008,opt,name=castkey", - Filename: "gogo.proto", -} - -var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65009, - Name: "gogoproto.castvalue", - Tag: "bytes,65009,opt,name=castvalue", - Filename: "gogo.proto", -} - -var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65010, - Name: "gogoproto.stdtime", - Tag: "varint,65010,opt,name=stdtime", - Filename: "gogo.proto", -} - -var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65011, - Name: "gogoproto.stdduration", - Tag: "varint,65011,opt,name=stdduration", - Filename: "gogo.proto", -} - -var E_Wktpointer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65012, - Name: "gogoproto.wktpointer", - Tag: "varint,65012,opt,name=wktpointer", - Filename: "gogo.proto", -} - -func init() { - proto.RegisterExtension(E_GoprotoEnumPrefix) - proto.RegisterExtension(E_GoprotoEnumStringer) - proto.RegisterExtension(E_EnumStringer) - proto.RegisterExtension(E_EnumCustomname) - proto.RegisterExtension(E_Enumdecl) - proto.RegisterExtension(E_EnumvalueCustomname) - proto.RegisterExtension(E_GoprotoGettersAll) - proto.RegisterExtension(E_GoprotoEnumPrefixAll) - proto.RegisterExtension(E_GoprotoStringerAll) - proto.RegisterExtension(E_VerboseEqualAll) - proto.RegisterExtension(E_FaceAll) - proto.RegisterExtension(E_GostringAll) - proto.RegisterExtension(E_PopulateAll) - proto.RegisterExtension(E_StringerAll) - proto.RegisterExtension(E_OnlyoneAll) - proto.RegisterExtension(E_EqualAll) - proto.RegisterExtension(E_DescriptionAll) - proto.RegisterExtension(E_TestgenAll) - proto.RegisterExtension(E_BenchgenAll) - proto.RegisterExtension(E_MarshalerAll) - proto.RegisterExtension(E_UnmarshalerAll) - proto.RegisterExtension(E_StableMarshalerAll) - proto.RegisterExtension(E_SizerAll) - proto.RegisterExtension(E_GoprotoEnumStringerAll) - proto.RegisterExtension(E_EnumStringerAll) - proto.RegisterExtension(E_UnsafeMarshalerAll) - proto.RegisterExtension(E_UnsafeUnmarshalerAll) - proto.RegisterExtension(E_GoprotoExtensionsMapAll) - proto.RegisterExtension(E_GoprotoUnrecognizedAll) - proto.RegisterExtension(E_GogoprotoImport) - proto.RegisterExtension(E_ProtosizerAll) - proto.RegisterExtension(E_CompareAll) - proto.RegisterExtension(E_TypedeclAll) - proto.RegisterExtension(E_EnumdeclAll) - proto.RegisterExtension(E_GoprotoRegistration) - proto.RegisterExtension(E_MessagenameAll) - proto.RegisterExtension(E_GoprotoSizecacheAll) - proto.RegisterExtension(E_GoprotoUnkeyedAll) - proto.RegisterExtension(E_GoprotoGetters) - proto.RegisterExtension(E_GoprotoStringer) - proto.RegisterExtension(E_VerboseEqual) - proto.RegisterExtension(E_Face) - proto.RegisterExtension(E_Gostring) - proto.RegisterExtension(E_Populate) - proto.RegisterExtension(E_Stringer) - proto.RegisterExtension(E_Onlyone) - proto.RegisterExtension(E_Equal) - proto.RegisterExtension(E_Description) - proto.RegisterExtension(E_Testgen) - proto.RegisterExtension(E_Benchgen) - proto.RegisterExtension(E_Marshaler) - proto.RegisterExtension(E_Unmarshaler) - proto.RegisterExtension(E_StableMarshaler) - proto.RegisterExtension(E_Sizer) - proto.RegisterExtension(E_UnsafeMarshaler) - proto.RegisterExtension(E_UnsafeUnmarshaler) - proto.RegisterExtension(E_GoprotoExtensionsMap) - proto.RegisterExtension(E_GoprotoUnrecognized) - proto.RegisterExtension(E_Protosizer) - proto.RegisterExtension(E_Compare) - proto.RegisterExtension(E_Typedecl) - proto.RegisterExtension(E_Messagename) - proto.RegisterExtension(E_GoprotoSizecache) - proto.RegisterExtension(E_GoprotoUnkeyed) - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) - proto.RegisterExtension(E_Customname) - proto.RegisterExtension(E_Jsontag) - proto.RegisterExtension(E_Moretags) - proto.RegisterExtension(E_Casttype) - proto.RegisterExtension(E_Castkey) - proto.RegisterExtension(E_Castvalue) - proto.RegisterExtension(E_Stdtime) - proto.RegisterExtension(E_Stdduration) - proto.RegisterExtension(E_Wktpointer) -} - -func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } - -var fileDescriptor_592445b5231bc2b9 = []byte{ - // 1328 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, - 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, - 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, - 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, - 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, - 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, - 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, - 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, - 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, - 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, - 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, - 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, - 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, - 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, - 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, - 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, - 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, - 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, - 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, - 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, - 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, - 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, - 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, - 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, - 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, - 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, - 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, - 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, - 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, - 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, - 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, - 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, - 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, - 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, - 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, - 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, - 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, - 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, - 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, - 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, - 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, - 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, - 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, - 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, - 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, - 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, - 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, - 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, - 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, - 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, - 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, - 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, - 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, - 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, - 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, - 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, - 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, - 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, - 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, - 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, - 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, - 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, - 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, - 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, - 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, - 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, - 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, - 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, - 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, - 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, - 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, - 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, - 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, - 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, - 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, - 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, - 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, - 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, - 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, - 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, - 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, - 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, - 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden deleted file mode 100644 index f6502e4b9..000000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-go. -// source: gogo.proto -// DO NOT EDIT! - -package gogoproto - -import proto "github.com/gogo/protobuf/proto" -import json "encoding/json" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51235, - Name: "gogoproto.nullable", - Tag: "varint,51235,opt,name=nullable", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51236, - Name: "gogoproto.embed", - Tag: "varint,51236,opt,name=embed", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 51237, - Name: "gogoproto.customtype", - Tag: "bytes,51237,opt,name=customtype", -} - -func init() { - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto deleted file mode 100644 index b80c85653..000000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; - -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go deleted file mode 100644 index 390d4e4be..000000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ /dev/null @@ -1,415 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gogoproto - -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -import proto "github.com/gogo/protobuf/proto" - -func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Embed, false) -} - -func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Nullable, true) -} - -func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdtime, false) -} - -func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdduration, false) -} - -func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" -} - -func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" -} - -func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" -} - -func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" -} - -func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" -} - -func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" -} - -func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" -} - -func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" -} - -func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" -} - -func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { - return (IsStdTime(field) || IsStdDuration(field) || - IsStdDouble(field) || IsStdFloat(field) || - IsStdInt64(field) || IsStdUInt64(field) || - IsStdInt32(field) || IsStdUInt32(field) || - IsStdBool(field) || - IsStdString(field) || IsStdBytes(field)) -} - -func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) -} - -func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { - nullable := IsNullable(field) - if field.IsMessage() || IsCustomType(field) { - return nullable - } - if proto3 { - return false - } - return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES -} - -func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCustomType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastKey(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastValue(field) - if len(typ) > 0 { - return true - } - return false -} - -func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) -} - -func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) -} - -func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customtype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Casttype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castkey) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castvalue) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { - name := GetCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { - name := GetEnumCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { - name := GetEnumValueCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Jsontag) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Moretags) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool - -func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) -} - -func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) -} - -func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) -} - -func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) -} - -func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) -} - -func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) -} - -func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) -} - -func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) -} - -func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) -} - -func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) -} - -func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) -} - -func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) -} - -func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) -} - -func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) -} - -func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) -} - -func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) -} - -func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) -} - -func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) -} - -func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) -} - -func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) -} - -func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) -} - -func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) -} - -func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) -} - -func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) -} - -func IsProto3(file *google_protobuf.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) -} - -func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) -} - -func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) -} - -func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) -} - -func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) -} - -func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f327..000000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09..000000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d90..000000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e1730..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index d17f80209..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,967 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion1 = true - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa39190..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad9083..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29d..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed02..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index c9e5fa020..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,599 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd93..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7..000000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index 9b1538d05..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3006 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index f520106e0..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,657 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index bb2622f28..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2245 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if fn.IsValid() && len(oneofFields) > 0 { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - } - } - - // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 0407ba85d..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,928 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index 1ce0be2fa..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa99..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b64..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf85..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile deleted file mode 100644 index 3496dc99d..000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - go install github.com/gogo/protobuf/protoc-gen-gostring - protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto - protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go deleted file mode 100644 index a85bf1984..000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go +++ /dev/null @@ -1,118 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package descriptor provides functions for obtaining protocol buffer -// descriptors for generated Go types. -// -// These functions cannot go in package proto because they depend on the -// generated protobuf descriptor messages, which themselves depend on proto. -package descriptor - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - - "github.com/gogo/protobuf/proto" -) - -// extractFile extracts a FileDescriptorProto from a gzip'd buffer. -func extractFile(gz []byte) (*FileDescriptorProto, error) { - r, err := gzip.NewReader(bytes.NewReader(gz)) - if err != nil { - return nil, fmt.Errorf("failed to open gzip reader: %v", err) - } - defer r.Close() - - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) - } - - fd := new(FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) - } - - return fd, nil -} - -// Message is a proto.Message with a method to return its descriptor. -// -// Message types generated by the protocol compiler always satisfy -// the Message interface. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it -// describing the given message. -func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { - gz, path := msg.Descriptor() - fd, err := extractFile(gz) - if err != nil { - panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) - } - - md = fd.MessageType[path[0]] - for _, i := range path[1:] { - md = md.NestedType[i] - } - return fd, md -} - -// Is this field a scalar numeric type? -func (field *FieldDescriptorProto) IsScalar() bool { - if field.Type == nil { - return false - } - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE, - FieldDescriptorProto_TYPE_FLOAT, - FieldDescriptorProto_TYPE_INT64, - FieldDescriptorProto_TYPE_UINT64, - FieldDescriptorProto_TYPE_INT32, - FieldDescriptorProto_TYPE_FIXED64, - FieldDescriptorProto_TYPE_FIXED32, - FieldDescriptorProto_TYPE_BOOL, - FieldDescriptorProto_TYPE_UINT32, - FieldDescriptorProto_TYPE_ENUM, - FieldDescriptorProto_TYPE_SFIXED32, - FieldDescriptorProto_TYPE_SFIXED64, - FieldDescriptorProto_TYPE_SINT32, - FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go deleted file mode 100644 index cacfa3923..000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2865 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -package descriptor - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{0} -} -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{1} -} -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2} -} -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2, 0} -} -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2, 1} -} -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4} -} -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{5} -} -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{6} -} -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{6, 0} -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{7} -} -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{8} -} -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{9} -} -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{18} -} -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{18, 0} -} -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{19} -} -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{19, 0} -} -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{20} -} -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{20, 0} -} -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} - -func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } - -var fileDescriptor_308767df5ffe18af = []byte{ - // 2522 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, - 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, - 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, - 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, - 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, - 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, - 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, - 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, - 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, - 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, - 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, - 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, - 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, - 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, - 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, - 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, - 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, - 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, - 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, - 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, - 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, - 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, - 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, - 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, - 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, - 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, - 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, - 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, - 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, - 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, - 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, - 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, - 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, - 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, - 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, - 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, - 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, - 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, - 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, - 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, - 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, - 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, - 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, - 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, - 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, - 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, - 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, - 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, - 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, - 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, - 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, - 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, - 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, - 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, - 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, - 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, - 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, - 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, - 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, - 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, - 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, - 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, - 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, - 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, - 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, - 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, - 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, - 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, - 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, - 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, - 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, - 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, - 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, - 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, - 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, - 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, - 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, - 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, - 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, - 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, - 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, - 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, - 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, - 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, - 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, - 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, - 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, - 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, - 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, - 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, - 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, - 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, - 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, - 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, - 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, - 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, - 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, - 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, - 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, - 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, - 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, - 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, - 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, - 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, - 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, - 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, - 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, - 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, - 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, - 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, - 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, - 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, - 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, - 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, - 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, - 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, - 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, - 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, - 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, - 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, - 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, - 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, - 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, - 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, - 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, - 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, - 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, - 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, - 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, - 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, - 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, - 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, - 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, - 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, - 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, - 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, - 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, - 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, - 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, - 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, - 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, - 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, - 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, - 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, - 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, - 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, - 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, - 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, - 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, - 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, - 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, - 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, - 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, - 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, - 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, - 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, - 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go deleted file mode 100644 index 165b2110d..000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ /dev/null @@ -1,752 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -package descriptor - -import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - math "math" - reflect "reflect" - sort "sort" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (this *FileDescriptorSet) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.FileDescriptorSet{") - if this.File != nil { - s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 16) - s = append(s, "&descriptor.FileDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Package != nil { - s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") - } - if this.Dependency != nil { - s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") - } - if this.PublicDependency != nil { - s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") - } - if this.WeakDependency != nil { - s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") - } - if this.MessageType != nil { - s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.Service != nil { - s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.SourceCodeInfo != nil { - s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") - } - if this.Syntax != nil { - s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.DescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Field != nil { - s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.NestedType != nil { - s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.ExtensionRange != nil { - s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") - } - if this.OneofDecl != nil { - s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ExtensionRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.DescriptorProto_ReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExtensionRangeOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.ExtensionRangeOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.FieldDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") - } - if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") - } - if this.TypeName != nil { - s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") - } - if this.Extendee != nil { - s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") - } - if this.DefaultValue != nil { - s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") - } - if this.OneofIndex != nil { - s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") - } - if this.JsonName != nil { - s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.OneofDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.EnumDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Value != nil { - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumValueDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.ServiceDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Method != nil { - s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&descriptor.MethodDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.InputType != nil { - s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") - } - if this.OutputType != nil { - s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ClientStreaming != nil { - s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") - } - if this.ServerStreaming != nil { - s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 25) - s = append(s, "&descriptor.FileOptions{") - if this.JavaPackage != nil { - s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") - } - if this.JavaOuterClassname != nil { - s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") - } - if this.JavaMultipleFiles != nil { - s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") - } - if this.JavaGenerateEqualsAndHash != nil { - s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") - } - if this.JavaStringCheckUtf8 != nil { - s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") - } - if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") - } - if this.GoPackage != nil { - s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") - } - if this.CcGenericServices != nil { - s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") - } - if this.JavaGenericServices != nil { - s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") - } - if this.PyGenericServices != nil { - s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") - } - if this.PhpGenericServices != nil { - s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.CcEnableArenas != nil { - s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") - } - if this.ObjcClassPrefix != nil { - s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") - } - if this.CsharpNamespace != nil { - s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") - } - if this.SwiftPrefix != nil { - s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") - } - if this.PhpClassPrefix != nil { - s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") - } - if this.PhpNamespace != nil { - s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") - } - if this.PhpMetadataNamespace != nil { - s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") - } - if this.RubyPackage != nil { - s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MessageOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.MessageOptions{") - if this.MessageSetWireFormat != nil { - s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") - } - if this.NoStandardDescriptorAccessor != nil { - s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.MapEntry != nil { - s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.FieldOptions{") - if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") - } - if this.Packed != nil { - s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") - } - if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") - } - if this.Lazy != nil { - s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.Weak != nil { - s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.OneofOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumOptions{") - if this.AllowAlias != nil { - s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumValueOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.ServiceOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.MethodOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.IdempotencyLevel != nil { - s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.UninterpretedOption{") - if this.Name != nil { - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - } - if this.IdentifierValue != nil { - s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") - } - if this.PositiveIntValue != nil { - s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") - } - if this.NegativeIntValue != nil { - s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") - } - if this.DoubleValue != nil { - s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") - } - if this.StringValue != nil { - s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") - } - if this.AggregateValue != nil { - s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption_NamePart) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.UninterpretedOption_NamePart{") - if this.NamePart != nil { - s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") - } - if this.IsExtension != nil { - s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.SourceCodeInfo{") - if this.Location != nil { - s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo_Location) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.SourceCodeInfo_Location{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.Span != nil { - s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") - } - if this.LeadingComments != nil { - s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") - } - if this.TrailingComments != nil { - s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") - } - if this.LeadingDetachedComments != nil { - s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.GeneratedCodeInfo{") - if this.Annotation != nil { - s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo_Annotation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.SourceFile != nil { - s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") - } - if this.Begin != nil { - s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringDescriptor(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { - e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) - if e == nil { - return "nil" - } - s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" - keys := make([]int, 0, len(e)) - for k := range e { - keys = append(keys, int(k)) - } - sort.Ints(keys) - ss := []string{} - for _, k := range keys { - ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) - } - s += strings.Join(ss, ",") + "})" - return s -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go deleted file mode 100644 index e0846a357..000000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ /dev/null @@ -1,390 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package descriptor - -import ( - "strings" -) - -func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { - if !msg.GetOptions().GetMapEntry() { - return nil, nil - } - return msg.GetField()[0], msg.GetField()[1] -} - -func dotToUnderscore(r rune) rune { - if r == '.' { - return '_' - } - return r -} - -func (field *FieldDescriptorProto) WireType() (wire int) { - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE: - return 1 - case FieldDescriptorProto_TYPE_FLOAT: - return 5 - case FieldDescriptorProto_TYPE_INT64: - return 0 - case FieldDescriptorProto_TYPE_UINT64: - return 0 - case FieldDescriptorProto_TYPE_INT32: - return 0 - case FieldDescriptorProto_TYPE_UINT32: - return 0 - case FieldDescriptorProto_TYPE_FIXED64: - return 1 - case FieldDescriptorProto_TYPE_FIXED32: - return 5 - case FieldDescriptorProto_TYPE_BOOL: - return 0 - case FieldDescriptorProto_TYPE_STRING: - return 2 - case FieldDescriptorProto_TYPE_GROUP: - return 2 - case FieldDescriptorProto_TYPE_MESSAGE: - return 2 - case FieldDescriptorProto_TYPE_BYTES: - return 2 - case FieldDescriptorProto_TYPE_ENUM: - return 0 - case FieldDescriptorProto_TYPE_SFIXED32: - return 5 - case FieldDescriptorProto_TYPE_SFIXED64: - return 1 - case FieldDescriptorProto_TYPE_SINT32: - return 0 - case FieldDescriptorProto_TYPE_SINT64: - return 0 - } - panic("unreachable") -} - -func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { - packed := field.IsPacked() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { - packed := field.IsPacked3() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey() []byte { - x := field.GetKeyUint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (field *FieldDescriptorProto) GetKey3() []byte { - x := field.GetKey3Uint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { - msg := desc.GetMessage(packageName, messageName) - if msg == nil { - return nil - } - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) - if nes != nil { - return nes - } - } - return nil -} - -func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) - if res != nil { - return res - } - } - return nil -} - -func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - if msg.GetName()+"."+nes.GetName() == typeName { - return nes - } - } - } - } - return nil -} - -func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - if msg.GetName()+"."+nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - } - } - return false -} - -func (msg *DescriptorProto) IsExtendable() bool { - return len(msg.GetExtensionRange()) > 0 -} - -func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetName() == fieldName { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetNumber() == fieldNum { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", "" - } - field := parent.GetFieldDescriptor(fieldName) - if field == nil { - var extPackageName string - extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) - if field == nil { - return "", "" - } - packageName = extPackageName - } - typeNames := strings.Split(field.GetTypeName(), ".") - if len(typeNames) == 1 { - msg := desc.GetMessage(packageName, typeName) - if msg == nil { - return "", "" - } - return packageName, msg.GetName() - } - if len(typeNames) > 2 { - for i := 1; i < len(typeNames)-1; i++ { - packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") - typeName = strings.Join(typeNames[len(typeNames)-i:], ".") - msg := desc.GetMessage(packageName, typeName) - if msg != nil { - typeNames := strings.Split(msg.GetName(), ".") - if len(typeNames) == 1 { - return packageName, msg.GetName() - } - return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] - } - } - } - return "", "" -} - -func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, enum := range file.GetEnumType() { - if enum.GetName() == typeName { - return enum - } - } - } - return nil -} - -func (f *FieldDescriptorProto) IsEnum() bool { - return *f.Type == FieldDescriptorProto_TYPE_ENUM -} - -func (f *FieldDescriptorProto) IsMessage() bool { - return *f.Type == FieldDescriptorProto_TYPE_MESSAGE -} - -func (f *FieldDescriptorProto) IsBytes() bool { - return *f.Type == FieldDescriptorProto_TYPE_BYTES -} - -func (f *FieldDescriptorProto) IsRepeated() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED -} - -func (f *FieldDescriptorProto) IsString() bool { - return *f.Type == FieldDescriptorProto_TYPE_STRING -} - -func (f *FieldDescriptorProto) IsBool() bool { - return *f.Type == FieldDescriptorProto_TYPE_BOOL -} - -func (f *FieldDescriptorProto) IsRequired() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED -} - -func (f *FieldDescriptorProto) IsPacked() bool { - return f.Options != nil && f.GetOptions().GetPacked() -} - -func (f *FieldDescriptorProto) IsPacked3() bool { - if f.IsRepeated() && f.IsScalar() { - if f.Options == nil || f.GetOptions().Packed == nil { - return true - } - return f.Options != nil && f.GetOptions().GetPacked() - } - return false -} - -func (m *DescriptorProto) HasExtension() bool { - return len(m.ExtensionRange) > 0 -} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go deleted file mode 100644 index ceadde6a5..000000000 --- a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go +++ /dev/null @@ -1,101 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package sortkeys - -import ( - "sort" -) - -func Strings(l []string) { - sort.Strings(l) -} - -func Float64s(l []float64) { - sort.Float64s(l) -} - -func Float32s(l []float32) { - sort.Sort(Float32Slice(l)) -} - -func Int64s(l []int64) { - sort.Sort(Int64Slice(l)) -} - -func Int32s(l []int32) { - sort.Sort(Int32Slice(l)) -} - -func Uint64s(l []uint64) { - sort.Sort(Uint64Slice(l)) -} - -func Uint32s(l []uint32) { - sort.Sort(Uint32Slice(l)) -} - -func Bools(l []bool) { - sort.Sort(BoolSlice(l)) -} - -type BoolSlice []bool - -func (p BoolSlice) Len() int { return len(p) } -func (p BoolSlice) Less(i, j int) bool { return p[j] } -func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int32Slice []int32 - -func (p Int32Slice) Len() int { return len(p) } -func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint32Slice []uint32 - -func (p Uint32Slice) Len() int { return len(p) } -func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Float32Slice []float32 - -func (p Float32Slice) Len() int { return len(p) } -func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb68..000000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 54bd7afdc..000000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d281..000000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a..000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f7..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09..000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index dea2617ce..000000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41b3..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index fa88add30..000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,607 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328bb7..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa9194a..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe071..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index a4b8c0cd3..000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,544 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa95..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc52..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725b..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af2..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 70276e8f5..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,141 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { - return false - } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 78ee52334..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -package any - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} -} - -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 493294255..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595da7..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 26d1ca2fb..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index 0d681ee21..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -package duration - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} -} - -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce41a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8da0df01a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,132 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index 31cd846de..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -package timestamp - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} -} - -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa03..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa19520..000000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae3160..000000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb0353..000000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b84..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65e..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f2049b..000000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 8d393e904..000000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 150d91bc8..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae905e..000000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod deleted file mode 100644 index f6406bb2c..000000000 --- a/vendor/github.com/golang/snappy/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692ea4..000000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index f8684d99f..000000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 51cf5cd1a..000000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/google/gofuzz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md deleted file mode 100644 index 64869af34..000000000 --- a/vendor/github.com/google/gofuzz/README.md +++ /dev/null @@ -1,71 +0,0 @@ -gofuzz -====== - -gofuzz is a library for populating go objects with random values. - -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) - -This is useful for testing: - -* Do your project's objects really serialize/unserialize correctly in all cases? -* Is there an incorrectly formatted object that will cause your project to panic? - -Import with ```import "github.com/google/gofuzz"``` - -You can use it on single variables: -```go -f := fuzz.New() -var myInt int -f.Fuzz(&myInt) // myInt gets a random value. -``` - -You can use it on maps: -```go -f := fuzz.New().NilChance(0).NumElements(1, 1) -var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. -``` - -Customize the chance of getting a nil pointer: -```go -f := fuzz.New().NilChance(.5) -var fancyStruct struct { - A, B, C, D *string -} -f.Fuzz(&fancyStruct) // About half the pointers should be set. -``` - -You can even customize the randomization completely if needed: -```go -type MyEnum string -const ( - A MyEnum = "A" - B MyEnum = "B" -) -type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string -} - -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, -) - -var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. -``` - -See more examples in ```example_test.go```. - -Happy testing! diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d4a..000000000 --- a/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 1dfa80a6f..000000000 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,487 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "time" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - fc.doFuzz(v.Field(i), 0) - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false -} - -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod deleted file mode 100644 index 8ec4fe9e9..000000000 --- a/vendor/github.com/google/gofuzz/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/google/gofuzz - -go 1.12 diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60b..000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f1..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6b..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268d..000000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11f1..000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d3..000000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9af..000000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79d..000000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b17461631..000000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6c0..000000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b06..000000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc9..000000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddbd..000000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54db..000000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cdc..000000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c7378..000000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404cc5..000000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac65..000000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 84af91c9f..000000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore deleted file mode 100644 index dd91ed205..000000000 --- a/vendor/github.com/gophercloud/gophercloud/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -**/*.swp -.idea -.vscode diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml deleted file mode 100644 index 9153a00fc..000000000 --- a/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -sudo: false -install: -- GO111MODULE=off go get golang.org/x/crypto/ssh -- GO111MODULE=off go get -v -tags 'fixtures acceptance' ./... -- GO111MODULE=off go get github.com/wadey/gocovmerge -- GO111MODULE=off go get github.com/mattn/goveralls -- GO111MODULE=off go get golang.org/x/tools/cmd/goimports -go: -- "1.10" -- "1.11" -- "1.12" -- "tip" -env: - global: - - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" - - GO111MODULE=on -before_script: -- go vet ./... -script: -- ./script/coverage -- ./script/unittest -- ./script/format -after_success: -- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml deleted file mode 100644 index 135e3b203..000000000 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ /dev/null @@ -1,114 +0,0 @@ -- job: - name: gophercloud-unittest - parent: golang-test - description: | - Run gophercloud unit test - run: .zuul/playbooks/gophercloud-unittest/run.yaml - nodeset: ubuntu-xenial-ut - -- job: - name: gophercloud-acceptance-test - parent: golang-test - description: | - Run gophercloud acceptance test on master branch - run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml - -- job: - name: gophercloud-acceptance-test-ironic - parent: golang-test - description: | - Run gophercloud ironic acceptance test on master branch - run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml - -- job: - name: gophercloud-acceptance-test-stein - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on stein branch - vars: - global_env: - OS_BRANCH: stable/stein - -- job: - name: gophercloud-acceptance-test-rocky - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-queens - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on queens branch - vars: - global_env: - OS_BRANCH: stable/queens - -- job: - name: gophercloud-acceptance-test-pike - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on pike branch - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-ocata - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on ocata branch - vars: - global_env: - OS_BRANCH: stable/ocata - -- job: - name: gophercloud-acceptance-test-newton - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on newton branch - vars: - global_env: - OS_BRANCH: stable/newton - -- job: - name: gophercloud-acceptance-test-mitaka - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on mitaka branch - vars: - global_env: - OS_BRANCH: stable/mitaka - nodeset: ubuntu-trusty - -- project: - name: gophercloud/gophercloud - check: - jobs: - - gophercloud-unittest - - gophercloud-acceptance-test - - gophercloud-acceptance-test-ironic - recheck-mitaka: - jobs: - - gophercloud-acceptance-test-mitaka - recheck-newton: - jobs: - - gophercloud-acceptance-test-newton - recheck-ocata: - jobs: - - gophercloud-acceptance-test-ocata - recheck-pike: - jobs: - - gophercloud-acceptance-test-pike - recheck-queens: - jobs: - - gophercloud-acceptance-test-queens - recheck-rocky: - jobs: - - gophercloud-acceptance-test-rocky - recheck-stein: - jobs: - - gophercloud-acceptance-test-stein diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md deleted file mode 100644 index 4a3752ab9..000000000 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -## 0.2.0 (Unreleased) - -## 0.1.0 (May 27, 2019) - -Initial tagged release. diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE deleted file mode 100644 index fbbbc9e4c..000000000 --- a/vendor/github.com/gophercloud/gophercloud/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md deleted file mode 100644 index ad29041d9..000000000 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ /dev/null @@ -1,159 +0,0 @@ -# Gophercloud: an OpenStack SDK for Go -[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) -[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) - -Gophercloud is an OpenStack Go SDK. - -## Useful links - -* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) -* [Effective Go](https://golang.org/doc/effective_go.html) - -## How to install - -Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) -is pointing to an appropriate directory where you want to install Gophercloud: - -```bash -mkdir $HOME/go -export GOPATH=$HOME/go -``` - -To protect yourself against changes in your dependencies, we highly recommend choosing a -[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for -your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install -Gophercloud as a dependency like so: - -```bash -go get github.com/gophercloud/gophercloud - -# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" - -godep save ./... -``` - -This will install all the source files you need into a `Godeps/_workspace` directory, which is -referenceable from your own source files when you use the `godep go` command. - -## Getting started - -### Credentials - -Because you'll be hitting an API, you will need to retrieve your OpenStack -credentials and either store them as environment variables or in your local Go -files. The first method is recommended because it decouples credential -information from source code, allowing you to push the latter to your version -control system without any security risk. - -You will need to retrieve the following: - -* username -* password -* a valid Keystone identity URL - -For users that have the OpenStack dashboard installed, there's a shortcut. If -you visit the `project/access_and_security` path in Horizon and click on the -"Download OpenStack RC File" button at the top right hand corner, you will -download a bash file that exports all of your access details to environment -variables. To execute the file, run `source admin-openrc.sh` and you will be -prompted for your password. - -### Authentication - -Once you have access to your credentials, you can begin plugging them into -Gophercloud. The next step is authentication, and this is handled by a base -"Provider" struct. To get one, you can either pass in your credentials -explicitly, or tell Gophercloud to use environment variables: - -```go -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -// Option 1: Pass in the values yourself -opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", -} - -// Option 2: Use a utility function to retrieve all your environment variables -opts, err := openstack.AuthOptionsFromEnv() -``` - -Once you have the `opts` variable, you can pass it in and get back a -`ProviderClient` struct: - -```go -provider, err := openstack.AuthenticatedClient(opts) -``` - -The `ProviderClient` is the top-level client that all of your OpenStack services -derive from. The provider contains all of the authentication details that allow -your Go code to access the API - such as the base URL and token ID. - -### Provision a server - -Once we have a base Provider, we inject it as a dependency into each OpenStack -service. In order to work with the Compute API, we need a Compute service -client; which can be created like so: - -```go -client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), -}) -``` - -We then use this `client` for any Compute API operation we want. In our case, -we want to provision a new server - so we invoke the `Create` method and pass -in the flavor ID (hardware specification) and image ID (operating system) we're -interested in: - -```go -import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - -server, err := servers.Create(client, servers.CreateOpts{ - Name: "My new server!", - FlavorRef: "flavor_id", - ImageRef: "image_id", -}).Extract() -``` - -The above code sample creates a new server with the parameters, and embodies the -new resource in the `server` variable (a -[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). - -## Advanced Usage - -Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. - -## Backwards-Compatibility Guarantees - -None. Vendor it and write tests covering the parts you use. - -## Contributing - -See the [contributing guide](./.github/CONTRIBUTING.md). - -## Help and feedback - -If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). - -## Thank You - -We'd like to extend special thanks and appreciation to the following: - -### OpenLab - - - -OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. - -### VEXXHOST - - - -VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go deleted file mode 100644 index 5ffa8d1e0..000000000 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ /dev/null @@ -1,437 +0,0 @@ -package gophercloud - -/* -AuthOptions stores information needed to authenticate to an OpenStack Cloud. -You can populate one manually, or use a provider's AuthOptionsFromEnv() function -to read relevant information from the standard environment variables. Pass one -to a provider's AuthenticatedClient function to authenticate and obtain a -ProviderClient representing an active session on that provider. - -Its fields are the union of those recognized by each identity implementation and -provider. - -An example of manually providing authentication information: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -An example of using AuthOptionsFromEnv(), where the environment variables can -be read from a file, such as a standard openrc file: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed by - // all of the identity services, it will often be populated by a provider-level - // function. - // - // The IdentityEndpoint is typically referred to as the "auth_url" or - // "OS_AUTH_URL" in the information provided by the cloud operator. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"-"` - - Password string `json:"password,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // The same fields are known as project_id and project_name in the Identity - // V3 API, but are collected as TenantID and TenantName here in both cases. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - // If DomainID or DomainName are provided, they will also apply to TenantName. - // It is not currently possible to authenticate with Username and a Domain - // and scope to a Project in a different Domain by using TenantName. To - // accomplish that, the ProjectID will need to be provided as the TenantID - // option. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud to - // cache your credentials in memory, and to allow Gophercloud to attempt to - // re-authenticate automatically if/when your token expires. If you set it to - // false, it will not cache these settings, but re-authentication will not be - // possible. This setting defaults to false. - // - // NOTE: The reauth function will try to re-authenticate endlessly if left - // unchecked. The way to limit the number of attempts is to provide a custom - // HTTP client to the provider client and provide a transport that implements - // the RoundTripper interface and stores the number of failed retries. For an - // example of this, see here: - // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - // Scope determines the scoping of the authentication request. - Scope *AuthScope `json:"-"` - - // Authentication through Application Credentials requires supplying name, project and secret - // For project we can use TenantID - ApplicationCredentialID string `json:"-"` - ApplicationCredentialName string `json:"-"` - ApplicationCredentialSecret string `json:"-"` -} - -// AuthScope allows a created token to be limited to a specific domain or project. -type AuthScope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string -} - -// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v2 tokens package -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - // Populate the request map. - authMap := make(map[string]interface{}) - - if opts.Username != "" { - if opts.Password != "" { - authMap["passwordCredentials"] = map[string]interface{}{ - "username": opts.Username, - "password": opts.Password, - } - } else { - return nil, ErrMissingInput{Argument: "Password"} - } - } else if opts.TokenID != "" { - authMap["token"] = map[string]interface{}{ - "id": opts.TokenID, - } - } else { - return nil, ErrMissingInput{Argument: "Username"} - } - - if opts.TenantID != "" { - authMap["tenantId"] = opts.TenantID - } - if opts.TenantName != "" { - authMap["tenantName"] = opts.TenantName - } - - return map[string]interface{}{"auth": authMap}, nil -} - -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - type domainReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - } - - type projectReq struct { - Domain *domainReq `json:"domain,omitempty"` - Name *string `json:"name,omitempty"` - ID *string `json:"id,omitempty"` - } - - type userReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Password string `json:"password,omitempty"` - Domain *domainReq `json:"domain,omitempty"` - } - - type passwordReq struct { - User userReq `json:"user"` - } - - type tokenReq struct { - ID string `json:"id"` - } - - type applicationCredentialReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - User *userReq `json:"user,omitempty"` - Secret *string `json:"secret,omitempty"` - } - - type identityReq struct { - Methods []string `json:"methods"` - Password *passwordReq `json:"password,omitempty"` - Token *tokenReq `json:"token,omitempty"` - ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` - } - - type authReq struct { - Identity identityReq `json:"identity"` - } - - type request struct { - Auth authReq `json:"auth"` - } - - // Populate the request structure based on the provided arguments. Create and return an error - // if insufficient or incompatible information is present. - var req request - - if opts.Password == "" { - if opts.TokenID != "" { - // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication - // parameters. - if opts.Username != "" { - return nil, ErrUsernameWithToken{} - } - if opts.UserID != "" { - return nil, ErrUserIDWithToken{} - } - if opts.DomainID != "" { - return nil, ErrDomainIDWithToken{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithToken{} - } - - // Configure the request for Token authentication. - req.Auth.Identity.Methods = []string{"token"} - req.Auth.Identity.Token = &tokenReq{ - ID: opts.TokenID, - } - - } else if opts.ApplicationCredentialID != "" { - // Configure the request for ApplicationCredentialID authentication. - // https://github.com/openstack/keystoneauth/blob/stable/rocky/keystoneauth1/identity/v3/application_credential.py#L48-L67 - // There are three kinds of possible application_credential requests - // 1. application_credential id + secret - // 2. application_credential name + secret + user_id - // 3. application_credential name + secret + username + domain_id / domain_name - if opts.ApplicationCredentialSecret == "" { - return nil, ErrAppCredMissingSecret{} - } - req.Auth.Identity.Methods = []string{"application_credential"} - req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ - ID: &opts.ApplicationCredentialID, - Secret: &opts.ApplicationCredentialSecret, - } - } else if opts.ApplicationCredentialName != "" { - if opts.ApplicationCredentialSecret == "" { - return nil, ErrAppCredMissingSecret{} - } - - var userRequest *userReq - - if opts.UserID != "" { - // UserID could be used without the domain information - userRequest = &userReq{ - ID: &opts.UserID, - } - } - - if userRequest == nil && opts.Username == "" { - // Make sure that Username or UserID are provided - return nil, ErrUsernameOrUserID{} - } - - if userRequest == nil && opts.DomainID != "" { - userRequest = &userReq{ - Name: &opts.Username, - Domain: &domainReq{ID: &opts.DomainID}, - } - } - - if userRequest == nil && opts.DomainName != "" { - userRequest = &userReq{ - Name: &opts.Username, - Domain: &domainReq{Name: &opts.DomainName}, - } - } - - // Make sure that DomainID or DomainName are provided among Username - if userRequest == nil { - return nil, ErrDomainIDOrDomainName{} - } - - req.Auth.Identity.Methods = []string{"application_credential"} - req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ - Name: &opts.ApplicationCredentialName, - User: userRequest, - Secret: &opts.ApplicationCredentialSecret, - } - } else { - // If no password or token ID or ApplicationCredential are available, authentication can't continue. - return nil, ErrMissingPassword{} - } - } else { - // Password authentication. - req.Auth.Identity.Methods = []string{"password"} - - // At least one of Username and UserID must be specified. - if opts.Username == "" && opts.UserID == "" { - return nil, ErrUsernameOrUserID{} - } - - if opts.Username != "" { - // If Username is provided, UserID may not be provided. - if opts.UserID != "" { - return nil, ErrUsernameOrUserID{} - } - - // Either DomainID or DomainName must also be specified. - if opts.DomainID == "" && opts.DomainName == "" { - return nil, ErrDomainIDOrDomainName{} - } - - if opts.DomainID != "" { - if opts.DomainName != "" { - return nil, ErrDomainIDOrDomainName{} - } - - // Configure the request for Username and Password authentication with a DomainID. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: opts.Password, - Domain: &domainReq{ID: &opts.DomainID}, - }, - } - } - - if opts.DomainName != "" { - // Configure the request for Username and Password authentication with a DomainName. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: opts.Password, - Domain: &domainReq{Name: &opts.DomainName}, - }, - } - } - } - - if opts.UserID != "" { - // If UserID is specified, neither DomainID nor DomainName may be. - if opts.DomainID != "" { - return nil, ErrDomainIDWithUserID{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithUserID{} - } - - // Configure the request for UserID and Password authentication. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ID: &opts.UserID, Password: opts.Password}, - } - } - } - - b, err := BuildRequestBody(req, "") - if err != nil { - return nil, err - } - - if len(scope) != 0 { - b["auth"].(map[string]interface{})["scope"] = scope - } - - return b, nil -} - -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - // For backwards compatibility. - // If AuthOptions.Scope was not set, try to determine it. - // This works well for common scenarios. - if opts.Scope == nil { - opts.Scope = new(AuthScope) - if opts.TenantID != "" { - opts.Scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - opts.Scope.ProjectName = opts.TenantName - opts.Scope.DomainID = opts.DomainID - opts.Scope.DomainName = opts.DomainName - } - } - } - - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, ErrScopeProjectIDOrProjectName{} - } - - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &opts.Scope.DomainName, - }, - }, nil - } - - return nil, nil -} - -func (opts AuthOptions) CanReauth() bool { - return opts.AllowReauth -} diff --git a/vendor/github.com/gophercloud/gophercloud/auth_result.go b/vendor/github.com/gophercloud/gophercloud/auth_result.go deleted file mode 100644 index 2e4699b97..000000000 --- a/vendor/github.com/gophercloud/gophercloud/auth_result.go +++ /dev/null @@ -1,52 +0,0 @@ -package gophercloud - -/* -AuthResult is the result from the request that was used to obtain a provider -client's Keystone token. It is returned from ProviderClient.GetAuthResult(). - -The following types satisfy this interface: - - github.com/gophercloud/gophercloud/openstack/identity/v2/tokens.CreateResult - github.com/gophercloud/gophercloud/openstack/identity/v3/tokens.CreateResult - -Usage example: - - import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - ) - - func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { - r := providerClient.GetAuthResult() - if r == nil { - //ProviderClient did not use openstack.Authenticate(), e.g. because token - //was set manually with ProviderClient.SetToken() - return "", errors.New("no AuthResult available") - } - switch r := r.(type) { - case tokens2.CreateResult: - u, err := r.ExtractUser() - if err != nil { - return "", err - } - return u.ID, nil - case tokens3.CreateResult: - u, err := r.ExtractUser() - if err != nil { - return "", err - } - return u.ID, nil - default: - panic(fmt.Sprintf("got unexpected AuthResult type %t", r)) - } - } - -Both implementing types share a lot of methods by name, like ExtractUser() in -this example. But those methods cannot be part of the AuthResult interface -because the return types are different (in this case, type tokens2.User vs. -type tokens3.User). -*/ -type AuthResult interface { - ExtractTokenID() (string, error) -} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go deleted file mode 100644 index 953ca822a..000000000 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Package gophercloud provides a multi-vendor interface to OpenStack-compatible -clouds. The library has a three-level hierarchy: providers, services, and -resources. - -Authenticating with Providers - -Provider structs represent the cloud providers that offer and manage a -collection of services. You will generally want to create one Provider -client per OpenStack cloud. - - It is now recommended to use the `clientconfig` package found at - https://github.com/gophercloud/utils/tree/master/openstack/clientconfig - for all authentication purposes. - - The below documentation is still relevant. clientconfig simply implements - the below and presents it in an easier and more flexible way. - -Use your OpenStack credentials to create a Provider client. The -IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in -information provided by the cloud operator. Additionally, the cloud may refer to -TenantID or TenantName as project_id and project_name. Credentials are -specified like so: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You can authenticate with a token by doing: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - TokenID: "{token_id}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You may also use the openstack.AuthOptionsFromEnv() helper function. This -function reads in standard environment variables frequently found in an -OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" -instead of "project". - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) - -Service Clients - -Service structs are specific to a provider and handle all of the logic and -operations for a particular OpenStack service. Examples of services include: -Compute, Object Storage, Block Storage. In order to define one, you need to -pass in the parent provider, like so: - - opts := gophercloud.EndpointOpts{Region: "RegionOne"} - - client, err := openstack.NewComputeV2(provider, opts) - -Resources - -Resource structs are the domain models that services make use of in order -to work with and represent the state of API resources: - - server, err := servers.Get(client, "{serverId}").Extract() - -Intermediate Result structs are returned for API operations, which allow -generic access to the HTTP headers, response body, and any errors associated -with the network transaction. To turn a result into a usable resource struct, -you must call the Extract method which is chained to the response, or an -Extract function from an applicable extension: - - result := servers.Get(client, "{serverId}") - - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) - -All requests that enumerate a collection return a Pager struct that is used to -iterate through the results one page at a time. Use the EachPage method on that -Pager to handle each successive Page in a closure, then use the appropriate -extraction method from that request's package to interpret that Page as a slice -of results: - - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - - // Handle the []servers.Server slice. - - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) - -If you want to obtain the entire collection of pages without doing any -intermediary processing on each page, you can use the AllPages method: - - allPages, err := servers.List(client, nil).AllPages() - allServers, err := servers.ExtractServers(allPages) - -This top-level package contains utility functions and data types that are used -throughout the provider and service packages. Of particular note for end users -are the AuthOptions and EndpointOpts structs. -*/ -package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go deleted file mode 100644 index 2fbc3c97f..000000000 --- a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go +++ /dev/null @@ -1,76 +0,0 @@ -package gophercloud - -// Availability indicates to whom a specific service endpoint is accessible: -// the internet at large, internal networks only, or only to administrators. -// Different identity services use different terminology for these. Identity v2 -// lists them as different kinds of URLs within the service catalog ("adminURL", -// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an -// endpoint's response. -type Availability string - -const ( - // AvailabilityAdmin indicates that an endpoint is only available to - // administrators. - AvailabilityAdmin Availability = "admin" - - // AvailabilityPublic indicates that an endpoint is available to everyone on - // the internet. - AvailabilityPublic Availability = "public" - - // AvailabilityInternal indicates that an endpoint is only available within - // the cluster's internal network. - AvailabilityInternal Availability = "internal" -) - -// EndpointOpts specifies search criteria used by queries against an -// OpenStack service catalog. The options must contain enough information to -// unambiguously identify one, and only one, endpoint within the catalog. -// -// Usually, these are passed to service client factory functions in a provider -// package, like "openstack.NewComputeV2()". -type EndpointOpts struct { - // Type [required] is the service type for the client (e.g., "compute", - // "object-store"). Generally, this will be supplied by the service client - // function, but a user-given value will be honored if provided. - Type string - - // Name [optional] is the service name for the client (e.g., "nova") as it - // appears in the service catalog. Services can have the same Type but a - // different Name, which is why both Type and Name are sometimes needed. - Name string - - // Region [required] is the geographic region in which the endpoint resides, - // generally specifying which datacenter should house your resources. - // Required only for services that span multiple regions. - Region string - - // Availability [optional] is the visibility of the endpoint to be returned. - // Valid types include the constants AvailabilityPublic, AvailabilityInternal, - // or AvailabilityAdmin from this package. - // - // Availability is not required, and defaults to AvailabilityPublic. Not all - // providers or services offer all Availability options. - Availability Availability -} - -/* -EndpointLocator is an internal function to be used by provider implementations. - -It provides an implementation that locates a single endpoint from a service -catalog for a specific ProviderClient based on user-provided EndpointOpts. The -provider then uses it to discover related ServiceClients. -*/ -type EndpointLocator func(EndpointOpts) (string, error) - -// ApplyDefaults is an internal method to be used by provider implementations. -// -// It sets EndpointOpts fields if not already set, including a default type. -// Currently, EndpointOpts.Availability defaults to the public endpoint. -func (eo *EndpointOpts) ApplyDefaults(t string) { - if eo.Type == "" { - eo.Type = t - } - if eo.Availability == "" { - eo.Availability = AvailabilityPublic - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go deleted file mode 100644 index 0bcb3af7f..000000000 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ /dev/null @@ -1,471 +0,0 @@ -package gophercloud - -import ( - "fmt" - "strings" -) - -// BaseError is an error type that all other error types embed. -type BaseError struct { - DefaultErrString string - Info string -} - -func (e BaseError) Error() string { - e.DefaultErrString = "An error occurred while executing a Gophercloud request." - return e.choseErrString() -} - -func (e BaseError) choseErrString() string { - if e.Info != "" { - return e.Info - } - return e.DefaultErrString -} - -// ErrMissingInput is the error when input is required in a particular -// situation but not provided by the user -type ErrMissingInput struct { - BaseError - Argument string -} - -func (e ErrMissingInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) - return e.choseErrString() -} - -// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. -type ErrInvalidInput struct { - ErrMissingInput - Value interface{} -} - -func (e ErrInvalidInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) - return e.choseErrString() -} - -// ErrMissingEnvironmentVariable is the error when environment variable is required -// in a particular situation but not provided by the user -type ErrMissingEnvironmentVariable struct { - BaseError - EnvironmentVariable string -} - -func (e ErrMissingEnvironmentVariable) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) - return e.choseErrString() -} - -// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables -// is required in a particular situation but not provided by the user -type ErrMissingAnyoneOfEnvironmentVariables struct { - BaseError - EnvironmentVariables []string -} - -func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Missing one of the following environment variables [%s]", - strings.Join(e.EnvironmentVariables, ", "), - ) - return e.choseErrString() -} - -// ErrUnexpectedResponseCode is returned by the Request method when a response code other than -// those listed in OkCodes is encountered. -type ErrUnexpectedResponseCode struct { - BaseError - URL string - Method string - Expected []int - Actual int - Body []byte -} - -func (e ErrUnexpectedResponseCode) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", - e.Expected, e.Method, e.URL, e.Actual, e.Body, - ) - return e.choseErrString() -} - -// ErrDefault400 is the default error type returned on a 400 HTTP response code. -type ErrDefault400 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault401 is the default error type returned on a 401 HTTP response code. -type ErrDefault401 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault403 is the default error type returned on a 403 HTTP response code. -type ErrDefault403 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault404 is the default error type returned on a 404 HTTP response code. -type ErrDefault404 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault405 is the default error type returned on a 405 HTTP response code. -type ErrDefault405 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault408 is the default error type returned on a 408 HTTP response code. -type ErrDefault408 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault409 is the default error type returned on a 409 HTTP response code. -type ErrDefault409 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault429 is the default error type returned on a 429 HTTP response code. -type ErrDefault429 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault500 is the default error type returned on a 500 HTTP response code. -type ErrDefault500 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault503 is the default error type returned on a 503 HTTP response code. -type ErrDefault503 struct { - ErrUnexpectedResponseCode -} - -func (e ErrDefault400) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Bad request with: [%s %s], error message: %s", - e.Method, e.URL, e.Body, - ) - return e.choseErrString() -} -func (e ErrDefault401) Error() string { - return "Authentication failed" -} -func (e ErrDefault403) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Request forbidden: [%s %s], error message: %s", - e.Method, e.URL, e.Body, - ) - return e.choseErrString() -} -func (e ErrDefault404) Error() string { - return "Resource not found" -} -func (e ErrDefault405) Error() string { - return "Method not allowed" -} -func (e ErrDefault408) Error() string { - return "The server timed out waiting for the request" -} -func (e ErrDefault429) Error() string { - return "Too many requests have been sent in a given amount of time. Pause" + - " requests, wait up to one minute, and try again." -} -func (e ErrDefault500) Error() string { - return "Internal Server Error" -} -func (e ErrDefault503) Error() string { - return "The service is currently unable to handle the request due to a temporary" + - " overloading or maintenance. This is a temporary condition. Try again later." -} - -// Err400er is the interface resource error types implement to override the error message -// from a 400 error. -type Err400er interface { - Error400(ErrUnexpectedResponseCode) error -} - -// Err401er is the interface resource error types implement to override the error message -// from a 401 error. -type Err401er interface { - Error401(ErrUnexpectedResponseCode) error -} - -// Err403er is the interface resource error types implement to override the error message -// from a 403 error. -type Err403er interface { - Error403(ErrUnexpectedResponseCode) error -} - -// Err404er is the interface resource error types implement to override the error message -// from a 404 error. -type Err404er interface { - Error404(ErrUnexpectedResponseCode) error -} - -// Err405er is the interface resource error types implement to override the error message -// from a 405 error. -type Err405er interface { - Error405(ErrUnexpectedResponseCode) error -} - -// Err408er is the interface resource error types implement to override the error message -// from a 408 error. -type Err408er interface { - Error408(ErrUnexpectedResponseCode) error -} - -// Err409er is the interface resource error types implement to override the error message -// from a 409 error. -type Err409er interface { - Error409(ErrUnexpectedResponseCode) error -} - -// Err429er is the interface resource error types implement to override the error message -// from a 429 error. -type Err429er interface { - Error429(ErrUnexpectedResponseCode) error -} - -// Err500er is the interface resource error types implement to override the error message -// from a 500 error. -type Err500er interface { - Error500(ErrUnexpectedResponseCode) error -} - -// Err503er is the interface resource error types implement to override the error message -// from a 503 error. -type Err503er interface { - Error503(ErrUnexpectedResponseCode) error -} - -// ErrTimeOut is the error type returned when an operations times out. -type ErrTimeOut struct { - BaseError -} - -func (e ErrTimeOut) Error() string { - e.DefaultErrString = "A time out occurred" - return e.choseErrString() -} - -// ErrUnableToReauthenticate is the error type returned when reauthentication fails. -type ErrUnableToReauthenticate struct { - BaseError - ErrOriginal error -} - -func (e ErrUnableToReauthenticate) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrErrorAfterReauthentication is the error type returned when reauthentication -// succeeds, but an error occurs afterword (usually an HTTP error). -type ErrErrorAfterReauthentication struct { - BaseError - ErrOriginal error -} - -func (e ErrErrorAfterReauthentication) Error() string { - e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrServiceNotFound is returned when no service in a service catalog matches -// the provided EndpointOpts. This is generally returned by provider service -// factory methods like "NewComputeV2()" and can mean that a service is not -// enabled for your account. -type ErrServiceNotFound struct { - BaseError -} - -func (e ErrServiceNotFound) Error() string { - e.DefaultErrString = "No suitable service could be found in the service catalog." - return e.choseErrString() -} - -// ErrEndpointNotFound is returned when no available endpoints match the -// provided EndpointOpts. This is also generally returned by provider service -// factory methods, and usually indicates that a region was specified -// incorrectly. -type ErrEndpointNotFound struct { - BaseError -} - -func (e ErrEndpointNotFound) Error() string { - e.DefaultErrString = "No suitable endpoint could be found in the service catalog." - return e.choseErrString() -} - -// ErrResourceNotFound is the error when trying to retrieve a resource's -// ID by name and the resource doesn't exist. -type ErrResourceNotFound struct { - BaseError - Name string - ResourceType string -} - -func (e ErrResourceNotFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrMultipleResourcesFound is the error when trying to retrieve a resource's -// ID by name and multiple resources have the user-provided name. -type ErrMultipleResourcesFound struct { - BaseError - Name string - Count int - ResourceType string -} - -func (e ErrMultipleResourcesFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrUnexpectedType is the error when an unexpected type is encountered -type ErrUnexpectedType struct { - BaseError - Expected string - Actual string -} - -func (e ErrUnexpectedType) Error() string { - e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) - return e.choseErrString() -} - -func unacceptedAttributeErr(attribute string) string { - return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) -} - -func redundantWithTokenErr(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) -} - -func redundantWithUserID(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) -} - -// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. -type ErrAPIKeyProvided struct{ BaseError } - -func (e ErrAPIKeyProvided) Error() string { - return unacceptedAttributeErr("APIKey") -} - -// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. -type ErrTenantIDProvided struct{ BaseError } - -func (e ErrTenantIDProvided) Error() string { - return unacceptedAttributeErr("TenantID") -} - -// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. -type ErrTenantNameProvided struct{ BaseError } - -func (e ErrTenantNameProvided) Error() string { - return unacceptedAttributeErr("TenantName") -} - -// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. -type ErrUsernameWithToken struct{ BaseError } - -func (e ErrUsernameWithToken) Error() string { - return redundantWithTokenErr("Username") -} - -// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. -type ErrUserIDWithToken struct{ BaseError } - -func (e ErrUserIDWithToken) Error() string { - return redundantWithTokenErr("UserID") -} - -// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. -type ErrDomainIDWithToken struct{ BaseError } - -func (e ErrDomainIDWithToken) Error() string { - return redundantWithTokenErr("DomainID") -} - -// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s -type ErrDomainNameWithToken struct{ BaseError } - -func (e ErrDomainNameWithToken) Error() string { - return redundantWithTokenErr("DomainName") -} - -// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. -type ErrUsernameOrUserID struct{ BaseError } - -func (e ErrUsernameOrUserID) Error() string { - return "Exactly one of Username and UserID must be provided for password authentication" -} - -// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. -type ErrDomainIDWithUserID struct{ BaseError } - -func (e ErrDomainIDWithUserID) Error() string { - return redundantWithUserID("DomainID") -} - -// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. -type ErrDomainNameWithUserID struct{ BaseError } - -func (e ErrDomainNameWithUserID) Error() string { - return redundantWithUserID("DomainName") -} - -// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. -// It may also indicate that both a DomainID and a DomainName were provided at once. -type ErrDomainIDOrDomainName struct{ BaseError } - -func (e ErrDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName to authenticate by Username" -} - -// ErrMissingPassword indicates that no password was provided and no token is available. -type ErrMissingPassword struct{ BaseError } - -func (e ErrMissingPassword) Error() string { - return "You must provide a password to authenticate" -} - -// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. -type ErrScopeDomainIDOrDomainName struct{ BaseError } - -func (e ErrScopeDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" -} - -// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. -type ErrScopeProjectIDOrProjectName struct{ BaseError } - -func (e ErrScopeProjectIDOrProjectName) Error() string { - return "You must provide at most one of ProjectID or ProjectName in a Scope" -} - -// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. -type ErrScopeProjectIDAlone struct{ BaseError } - -func (e ErrScopeProjectIDAlone) Error() string { - return "ProjectID must be supplied alone in a Scope" -} - -// ErrScopeEmpty indicates that no credentials were provided in a Scope. -type ErrScopeEmpty struct{ BaseError } - -func (e ErrScopeEmpty) Error() string { - return "You must provide either a Project or Domain in a Scope" -} - -// ErrAppCredMissingSecret indicates that no Application Credential Secret was provided with Application Credential ID or Name -type ErrAppCredMissingSecret struct{ BaseError } - -func (e ErrAppCredMissingSecret) Error() string { - return "You must provide an Application Credential Secret" -} diff --git a/vendor/github.com/gophercloud/gophercloud/go.mod b/vendor/github.com/gophercloud/gophercloud/go.mod deleted file mode 100644 index d1ee3b472..000000000 --- a/vendor/github.com/gophercloud/gophercloud/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/gophercloud/gophercloud - -require ( - golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 - golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/vendor/github.com/gophercloud/gophercloud/go.sum b/vendor/github.com/gophercloud/gophercloud/go.sum deleted file mode 100644 index 33cb0be8a..000000000 --- a/vendor/github.com/gophercloud/gophercloud/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go deleted file mode 100644 index 0e8d90ff8..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ /dev/null @@ -1,125 +0,0 @@ -package openstack - -import ( - "os" - - "github.com/gophercloud/gophercloud" -) - -var nilOptions = gophercloud.AuthOptions{} - -/* -AuthOptionsFromEnv fills out an identity.AuthOptions structure with the -settings found on the various OpenStack OS_* environment variables. - -The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -OS_PASSWORD and OS_PROJECT_ID. - -Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_PROJECT_ID, is optional. - -OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and -OS_PROJECT_NAME and the latter are expected against a v3 auth api. - -If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred -as "tenant" in Gophercloud. - -If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to -handle projects not on the default domain. - -To use this function, first set the OS_* environment variables (for example, -by sourcing an `openrc` file), then: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { - authURL := os.Getenv("OS_AUTH_URL") - username := os.Getenv("OS_USERNAME") - userID := os.Getenv("OS_USERID") - password := os.Getenv("OS_PASSWORD") - tenantID := os.Getenv("OS_TENANT_ID") - tenantName := os.Getenv("OS_TENANT_NAME") - domainID := os.Getenv("OS_DOMAIN_ID") - domainName := os.Getenv("OS_DOMAIN_NAME") - applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") - applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") - applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") - - // If OS_PROJECT_ID is set, overwrite tenantID with the value. - if v := os.Getenv("OS_PROJECT_ID"); v != "" { - tenantID = v - } - - // If OS_PROJECT_NAME is set, overwrite tenantName with the value. - if v := os.Getenv("OS_PROJECT_NAME"); v != "" { - tenantName = v - } - - if authURL == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_AUTH_URL", - } - return nilOptions, err - } - - if userID == "" && username == "" { - // Empty username and userID could be ignored, when applicationCredentialID and applicationCredentialSecret are set - if applicationCredentialID == "" && applicationCredentialSecret == "" { - err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, - } - return nilOptions, err - } - } - - if password == "" && applicationCredentialID == "" && applicationCredentialName == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_PASSWORD", - } - return nilOptions, err - } - - if (applicationCredentialID != "" || applicationCredentialName != "") && applicationCredentialSecret == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_APPLICATION_CREDENTIAL_SECRET", - } - return nilOptions, err - } - - if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_PROJECT_ID", - } - return nilOptions, err - } - - if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { - if userID == "" && username == "" { - return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, - } - } - if username != "" && domainID == "" && domainName == "" { - return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_DOMAIN_ID", "OS_DOMAIN_NAME"}, - } - } - } - - ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - UserID: userID, - Username: username, - Password: password, - TenantID: tenantID, - TenantName: tenantName, - DomainID: domainID, - DomainName: domainName, - ApplicationCredentialID: applicationCredentialID, - ApplicationCredentialName: applicationCredentialName, - ApplicationCredentialSecret: applicationCredentialSecret, - } - - return ao, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/doc.go deleted file mode 100644 index b0a2c8ff3..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Package schedulerstats returns information about block storage pool capacity -and utilisation. Example: - - listOpts := schedulerstats.ListOpts{ - Detail: true, - } - - allPages, err := schedulerstats.List(client, listOpts).AllPages() - if err != nil { - panic(err) - } - - allStats, err := schedulerstats.ExtractStoragePools(allPages) - if err != nil { - panic(err) - } - - for _, stat := range allStats { - fmt.Printf("%+v\n", stat) - } -*/ -package schedulerstats diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/requests.go deleted file mode 100644 index 7b374dcd8..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/requests.go +++ /dev/null @@ -1,43 +0,0 @@ -package schedulerstats - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToStoragePoolsListQuery() (string, error) -} - -// ListOpts controls the view of data returned (e.g globally or per project) -// via tenant_id and the verbosity via detail. -type ListOpts struct { - // ID of the tenant to look up storage pools for. - TenantID string `q:"tenant_id"` - - // Whether to list extended details. - Detail bool `q:"detail"` -} - -// ToStoragePoolsListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToStoragePoolsListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List makes a request against the API to list storage pool information. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := storagePoolsListURL(client) - if opts != nil { - query, err := opts.ToStoragePoolsListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return StoragePoolPage{pagination.SinglePageBase(r)} - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/results.go deleted file mode 100644 index 11109673c..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/results.go +++ /dev/null @@ -1,98 +0,0 @@ -package schedulerstats - -import ( - "encoding/json" - "math" - - "github.com/gophercloud/gophercloud/pagination" -) - -// Capabilities represents the information of an individual StoragePool. -type Capabilities struct { - // The following fields should be present in all storage drivers. - DriverVersion string `json:"driver_version"` - FreeCapacityGB float64 `json:"-"` - StorageProtocol string `json:"storage_protocol"` - TotalCapacityGB float64 `json:"-"` - VendorName string `json:"vendor_name"` - VolumeBackendName string `json:"volume_backend_name"` - - // The following fields are optional and may have empty values depending - // on the storage driver in use. - ReservedPercentage int64 `json:"reserved_percentage"` - LocationInfo string `json:"location_info"` - QoSSupport bool `json:"QoS_support"` - ProvisionedCapacityGB float64 `json:"provisioned_capacity_gb"` - MaxOverSubscriptionRatio string `json:"max_over_subscription_ratio"` - ThinProvisioningSupport bool `json:"thin_provisioning_support"` - ThickProvisioningSupport bool `json:"thick_provisioning_support"` - TotalVolumes int64 `json:"total_volumes"` - FilterFunction string `json:"filter_function"` - GoodnessFuction string `json:"goodness_function"` - Multiattach bool `json:"multiattach"` - SparseCopyVolume bool `json:"sparse_copy_volume"` -} - -// StoragePool represents an individual StoragePool retrieved from the -// schedulerstats API. -type StoragePool struct { - Name string `json:"name"` - Capabilities Capabilities `json:"capabilities"` -} - -func (r *Capabilities) UnmarshalJSON(b []byte) error { - type tmp Capabilities - var s struct { - tmp - FreeCapacityGB interface{} `json:"free_capacity_gb"` - TotalCapacityGB interface{} `json:"total_capacity_gb"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Capabilities(s.tmp) - - // Generic function to parse a capacity value which may be a numeric - // value, "unknown", or "infinite" - parseCapacity := func(capacity interface{}) float64 { - if capacity != nil { - switch capacity.(type) { - case float64: - return capacity.(float64) - case string: - if capacity.(string) == "infinite" { - return math.Inf(1) - } - } - } - return 0.0 - } - - r.FreeCapacityGB = parseCapacity(s.FreeCapacityGB) - r.TotalCapacityGB = parseCapacity(s.TotalCapacityGB) - - return nil -} - -// StoragePoolPage is a single page of all List results. -type StoragePoolPage struct { - pagination.SinglePageBase -} - -// IsEmpty satisfies the IsEmpty method of the Page interface. It returns true -// if a List contains no results. -func (page StoragePoolPage) IsEmpty() (bool, error) { - va, err := ExtractStoragePools(page) - return len(va) == 0, err -} - -// ExtractStoragePools takes a List result and extracts the collection of -// StoragePools returned by the API. -func ExtractStoragePools(p pagination.Page) ([]StoragePool, error) { - var s struct { - StoragePools []StoragePool `json:"pools"` - } - err := (p.(StoragePoolPage)).ExtractInto(&s) - return s.StoragePools, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/urls.go deleted file mode 100644 index c0ddb3695..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package schedulerstats - -import "github.com/gophercloud/gophercloud" - -func storagePoolsListURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("scheduler-stats", "get_pools") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go deleted file mode 100644 index a78d3d048..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Package volumeactions provides information and interaction with volumes in the -OpenStack Block Storage service. A volume is a detachable block storage -device, akin to a USB hard drive. - -Example of Attaching a Volume to an Instance - - attachOpts := volumeactions.AttachOpts{ - MountPoint: "/mnt", - Mode: "rw", - InstanceUUID: server.ID, - } - - err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr() - if err != nil { - panic(err) - } - - detachOpts := volumeactions.DetachOpts{ - AttachmentID: volume.Attachments[0].AttachmentID, - } - - err = volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr() - if err != nil { - panic(err) - } - - -Example of Creating an Image from a Volume - - uploadImageOpts := volumeactions.UploadImageOpts{ - ImageName: "my_vol", - Force: true, - } - - volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() - if err != nil { - panic(err) - } - - fmt.Printf("%+v\n", volumeImage) - -Example of Extending a Volume's Size - - extendOpts := volumeactions.ExtendSizeOpts{ - NewSize: 100, - } - - err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() - if err != nil { - panic(err) - } - -Example of Initializing a Volume Connection - - connectOpts := &volumeactions.InitializeConnectionOpts{ - IP: "127.0.0.1", - Host: "stack", - Initiator: "iqn.1994-05.com.redhat:17cf566367d2", - Multipath: gophercloud.Disabled, - Platform: "x86_64", - OSType: "linux2", - } - - connectionInfo, err := volumeactions.InitializeConnection(client, volume.ID, connectOpts).Extract() - if err != nil { - panic(err) - } - - fmt.Printf("%+v\n", connectionInfo["data"]) - - terminateOpts := &volumeactions.InitializeConnectionOpts{ - IP: "127.0.0.1", - Host: "stack", - Initiator: "iqn.1994-05.com.redhat:17cf566367d2", - Multipath: gophercloud.Disabled, - Platform: "x86_64", - OSType: "linux2", - } - - err = volumeactions.TerminateConnection(client, volume.ID, terminateOpts).ExtractErr() - if err != nil { - panic(err) - } -*/ -package volumeactions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go deleted file mode 100644 index d18bff555..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go +++ /dev/null @@ -1,269 +0,0 @@ -package volumeactions - -import ( - "github.com/gophercloud/gophercloud" -) - -// AttachOptsBuilder allows extensions to add additional parameters to the -// Attach request. -type AttachOptsBuilder interface { - ToVolumeAttachMap() (map[string]interface{}, error) -} - -// AttachMode describes the attachment mode for volumes. -type AttachMode string - -// These constants determine how a volume is attached. -const ( - ReadOnly AttachMode = "ro" - ReadWrite AttachMode = "rw" -) - -// AttachOpts contains options for attaching a Volume. -type AttachOpts struct { - // The mountpoint of this volume. - MountPoint string `json:"mountpoint,omitempty"` - - // The nova instance ID, can't set simultaneously with HostName. - InstanceUUID string `json:"instance_uuid,omitempty"` - - // The hostname of baremetal host, can't set simultaneously with InstanceUUID. - HostName string `json:"host_name,omitempty"` - - // Mount mode of this volume. - Mode AttachMode `json:"mode,omitempty"` -} - -// ToVolumeAttachMap assembles a request body based on the contents of a -// AttachOpts. -func (opts AttachOpts) ToVolumeAttachMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-attach") -} - -// Attach will attach a volume based on the values in AttachOpts. -func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder) (r AttachResult) { - b, err := opts.ToVolumeAttachMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// BeginDetach will mark the volume as detaching. -func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { - b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// DetachOptsBuilder allows extensions to add additional parameters to the -// Detach request. -type DetachOptsBuilder interface { - ToVolumeDetachMap() (map[string]interface{}, error) -} - -// DetachOpts contains options for detaching a Volume. -type DetachOpts struct { - // AttachmentID is the ID of the attachment between a volume and instance. - AttachmentID string `json:"attachment_id,omitempty"` -} - -// ToVolumeDetachMap assembles a request body based on the contents of a -// DetachOpts. -func (opts DetachOpts) ToVolumeDetachMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-detach") -} - -// Detach will detach a volume based on volume ID. -func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder) (r DetachResult) { - b, err := opts.ToVolumeDetachMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// Reserve will reserve a volume based on volume ID. -func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { - b := map[string]interface{}{"os-reserve": make(map[string]interface{})} - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// Unreserve will unreserve a volume based on volume ID. -func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { - b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// InitializeConnectionOptsBuilder allows extensions to add additional parameters to the -// InitializeConnection request. -type InitializeConnectionOptsBuilder interface { - ToVolumeInitializeConnectionMap() (map[string]interface{}, error) -} - -// InitializeConnectionOpts hosts options for InitializeConnection. -// The fields are specific to the storage driver in use and the destination -// attachment. -type InitializeConnectionOpts struct { - IP string `json:"ip,omitempty"` - Host string `json:"host,omitempty"` - Initiator string `json:"initiator,omitempty"` - Wwpns []string `json:"wwpns,omitempty"` - Wwnns string `json:"wwnns,omitempty"` - Multipath *bool `json:"multipath,omitempty"` - Platform string `json:"platform,omitempty"` - OSType string `json:"os_type,omitempty"` -} - -// ToVolumeInitializeConnectionMap assembles a request body based on the contents of a -// InitializeConnectionOpts. -func (opts InitializeConnectionOpts) ToVolumeInitializeConnectionMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "connector") - return map[string]interface{}{"os-initialize_connection": b}, err -} - -// InitializeConnection initializes an iSCSI connection by volume ID. -func InitializeConnection(client *gophercloud.ServiceClient, id string, opts InitializeConnectionOptsBuilder) (r InitializeConnectionResult) { - b, err := opts.ToVolumeInitializeConnectionMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// TerminateConnectionOptsBuilder allows extensions to add additional parameters to the -// TerminateConnection request. -type TerminateConnectionOptsBuilder interface { - ToVolumeTerminateConnectionMap() (map[string]interface{}, error) -} - -// TerminateConnectionOpts hosts options for TerminateConnection. -type TerminateConnectionOpts struct { - IP string `json:"ip,omitempty"` - Host string `json:"host,omitempty"` - Initiator string `json:"initiator,omitempty"` - Wwpns []string `json:"wwpns,omitempty"` - Wwnns string `json:"wwnns,omitempty"` - Multipath *bool `json:"multipath,omitempty"` - Platform string `json:"platform,omitempty"` - OSType string `json:"os_type,omitempty"` -} - -// ToVolumeTerminateConnectionMap assembles a request body based on the contents of a -// TerminateConnectionOpts. -func (opts TerminateConnectionOpts) ToVolumeTerminateConnectionMap() (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "connector") - return map[string]interface{}{"os-terminate_connection": b}, err -} - -// TerminateConnection terminates an iSCSI connection by volume ID. -func TerminateConnection(client *gophercloud.ServiceClient, id string, opts TerminateConnectionOptsBuilder) (r TerminateConnectionResult) { - b, err := opts.ToVolumeTerminateConnectionMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// ExtendSizeOptsBuilder allows extensions to add additional parameters to the -// ExtendSize request. -type ExtendSizeOptsBuilder interface { - ToVolumeExtendSizeMap() (map[string]interface{}, error) -} - -// ExtendSizeOpts contains options for extending the size of an existing Volume. -// This object is passed to the volumes.ExtendSize function. -type ExtendSizeOpts struct { - // NewSize is the new size of the volume, in GB. - NewSize int `json:"new_size" required:"true"` -} - -// ToVolumeExtendSizeMap assembles a request body based on the contents of an -// ExtendSizeOpts. -func (opts ExtendSizeOpts) ToVolumeExtendSizeMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-extend") -} - -// ExtendSize will extend the size of the volume based on the provided information. -// This operation does not return a response body. -func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOptsBuilder) (r ExtendSizeResult) { - b, err := opts.ToVolumeExtendSizeMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// UploadImageOptsBuilder allows extensions to add additional parameters to the -// UploadImage request. -type UploadImageOptsBuilder interface { - ToVolumeUploadImageMap() (map[string]interface{}, error) -} - -// UploadImageOpts contains options for uploading a Volume to image storage. -type UploadImageOpts struct { - // Container format, may be bare, ofv, ova, etc. - ContainerFormat string `json:"container_format,omitempty"` - - // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. - DiskFormat string `json:"disk_format,omitempty"` - - // The name of image that will be stored in glance. - ImageName string `json:"image_name,omitempty"` - - // Force image creation, usable if volume attached to instance. - Force bool `json:"force,omitempty"` -} - -// ToVolumeUploadImageMap assembles a request body based on the contents of a -// UploadImageOpts. -func (opts UploadImageOpts) ToVolumeUploadImageMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "os-volume_upload_image") -} - -// UploadImage will upload an image based on the values in UploadImageOptsBuilder. -func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageOptsBuilder) (r UploadImageResult) { - b, err := opts.ToVolumeUploadImageMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// ForceDelete will delete the volume regardless of state. -func ForceDelete(client *gophercloud.ServiceClient, id string) (r ForceDeleteResult) { - _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-force_delete": ""}, nil, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go deleted file mode 100644 index 5cadd360f..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go +++ /dev/null @@ -1,191 +0,0 @@ -package volumeactions - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" -) - -// AttachResult contains the response body and error from an Attach request. -type AttachResult struct { - gophercloud.ErrResult -} - -// BeginDetachingResult contains the response body and error from a BeginDetach -// request. -type BeginDetachingResult struct { - gophercloud.ErrResult -} - -// DetachResult contains the response body and error from a Detach request. -type DetachResult struct { - gophercloud.ErrResult -} - -// UploadImageResult contains the response body and error from an UploadImage -// request. -type UploadImageResult struct { - gophercloud.Result -} - -// ReserveResult contains the response body and error from a Reserve request. -type ReserveResult struct { - gophercloud.ErrResult -} - -// UnreserveResult contains the response body and error from an Unreserve -// request. -type UnreserveResult struct { - gophercloud.ErrResult -} - -// TerminateConnectionResult contains the response body and error from a -// TerminateConnection request. -type TerminateConnectionResult struct { - gophercloud.ErrResult -} - -// InitializeConnectionResult contains the response body and error from an -// InitializeConnection request. -type InitializeConnectionResult struct { - gophercloud.Result -} - -// ExtendSizeResult contains the response body and error from an ExtendSize request. -type ExtendSizeResult struct { - gophercloud.ErrResult -} - -// Extract will get the connection information out of the -// InitializeConnectionResult object. -// -// This will be a generic map[string]interface{} and the results will be -// dependent on the type of connection made. -func (r InitializeConnectionResult) Extract() (map[string]interface{}, error) { - var s struct { - ConnectionInfo map[string]interface{} `json:"connection_info"` - } - err := r.ExtractInto(&s) - return s.ConnectionInfo, err -} - -// ImageVolumeType contains volume type information obtained from UploadImage -// action. -type ImageVolumeType struct { - // The ID of a volume type. - ID string `json:"id"` - - // Human-readable display name for the volume type. - Name string `json:"name"` - - // Human-readable description for the volume type. - Description string `json:"display_description"` - - // Flag for public access. - IsPublic bool `json:"is_public"` - - // Extra specifications for volume type. - ExtraSpecs map[string]interface{} `json:"extra_specs"` - - // ID of quality of service specs. - QosSpecsID string `json:"qos_specs_id"` - - // Flag for deletion status of volume type. - Deleted bool `json:"deleted"` - - // The date when volume type was deleted. - DeletedAt time.Time `json:"-"` - - // The date when volume type was created. - CreatedAt time.Time `json:"-"` - - // The date when this volume was last updated. - UpdatedAt time.Time `json:"-"` -} - -func (r *ImageVolumeType) UnmarshalJSON(b []byte) error { - type tmp ImageVolumeType - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = ImageVolumeType(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - r.DeletedAt = time.Time(s.DeletedAt) - - return err -} - -// VolumeImage contains information about volume uploaded to an image service. -type VolumeImage struct { - // The ID of a volume an image is created from. - VolumeID string `json:"id"` - - // Container format, may be bare, ofv, ova, etc. - ContainerFormat string `json:"container_format"` - - // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. - DiskFormat string `json:"disk_format"` - - // Human-readable description for the volume. - Description string `json:"display_description"` - - // The ID of the created image. - ImageID string `json:"image_id"` - - // Human-readable display name for the image. - ImageName string `json:"image_name"` - - // Size of the volume in GB. - Size int `json:"size"` - - // Current status of the volume. - Status string `json:"status"` - - // The date when this volume was last updated. - UpdatedAt time.Time `json:"-"` - - // Volume type object of used volume. - VolumeType ImageVolumeType `json:"volume_type"` -} - -func (r *VolumeImage) UnmarshalJSON(b []byte) error { - type tmp VolumeImage - var s struct { - tmp - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = VolumeImage(s.tmp) - - r.UpdatedAt = time.Time(s.UpdatedAt) - - return err -} - -// Extract will get an object with info about the uploaded image out of the -// UploadImageResult object. -func (r UploadImageResult) Extract() (VolumeImage, error) { - var s struct { - VolumeImage VolumeImage `json:"os-volume_upload_image"` - } - err := r.ExtractInto(&s) - return s.VolumeImage, err -} - -// ForceDeleteResult contains the response body and error from a ForceDelete request. -type ForceDeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go deleted file mode 100644 index 20486ed71..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package volumeactions - -import "github.com/gophercloud/gophercloud" - -func actionURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("volumes", id, "action") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/doc.go deleted file mode 100644 index 25a7f8458..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Package noauth creates a "noauth" *gophercloud.ServiceClient for use in Cinder -environments configured with the noauth authentication middleware. - -Example of Creating a noauth Service Client - - provider, err := noauth.NewClient(gophercloud.AuthOptions{ - Username: os.Getenv("OS_USERNAME"), - TenantName: os.Getenv("OS_TENANT_NAME"), - }) - client, err := noauth.NewBlockStorageNoAuth(provider, noauth.EndpointOpts{ - CinderEndpoint: os.Getenv("CINDER_ENDPOINT"), - }) - - An example of a CinderEndpoint would be: http://example.com:8776/v2, -*/ -package noauth diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/requests.go deleted file mode 100644 index 21cc8f09d..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/requests.go +++ /dev/null @@ -1,55 +0,0 @@ -package noauth - -import ( - "fmt" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// EndpointOpts specifies a "noauth" Cinder Endpoint. -type EndpointOpts struct { - // CinderEndpoint [required] is currently only used with "noauth" Cinder. - // A cinder endpoint with "auth_strategy=noauth" is necessary, for example: - // http://example.com:8776/v2. - CinderEndpoint string -} - -// NewClient prepares an unauthenticated ProviderClient instance. -func NewClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - if options.Username == "" { - options.Username = "admin" - } - if options.TenantName == "" { - options.TenantName = "admin" - } - - client := &gophercloud.ProviderClient{ - TokenID: fmt.Sprintf("%s:%s", options.Username, options.TenantName), - } - - return client, nil -} - -func initClientOpts(client *gophercloud.ProviderClient, eo EndpointOpts) (*gophercloud.ServiceClient, error) { - sc := new(gophercloud.ServiceClient) - if eo.CinderEndpoint == "" { - return nil, fmt.Errorf("CinderEndpoint is required") - } - - token := strings.Split(client.TokenID, ":") - if len(token) != 2 { - return nil, fmt.Errorf("Malformed noauth token") - } - - endpoint := fmt.Sprintf("%s%s", gophercloud.NormalizeURL(eo.CinderEndpoint), token[1]) - sc.Endpoint = gophercloud.NormalizeURL(endpoint) - sc.ProviderClient = client - return sc, nil -} - -// NewBlockStorageNoAuth creates a ServiceClient that may be used to access a -// "noauth" block storage service (V2 or V3 Cinder API). -func NewBlockStorageNoAuth(client *gophercloud.ProviderClient, eo EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go deleted file mode 100644 index 198f83077..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package snapshots provides information and interaction with snapshots in the -// OpenStack Block Storage service. A snapshot is a point in time copy of the -// data contained in an external storage volume, and can be controlled -// programmatically. -package snapshots diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go deleted file mode 100644 index 939e50204..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go +++ /dev/null @@ -1,175 +0,0 @@ -package snapshots - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToSnapshotCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains options for creating a Snapshot. This object is passed to -// the snapshots.Create function. For more information about these parameters, -// see the Snapshot object. -type CreateOpts struct { - VolumeID string `json:"volume_id" required:"true"` - Force bool `json:"force,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ToSnapshotCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "snapshot") -} - -// Create will create a new Snapshot based on the values in CreateOpts. To -// extract the Snapshot object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToSnapshotCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// Delete will delete the existing Snapshot with the provided ID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// Get retrieves the Snapshot with the provided ID. To extract the Snapshot -// object from the response, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToSnapshotListQuery() (string, error) -} - -// ListOpts hold options for listing Snapshots. It is passed to the -// snapshots.List function. -type ListOpts struct { - // AllTenants will retrieve snapshots of all tenants/projects. - AllTenants bool `q:"all_tenants"` - - // Name will filter by the specified snapshot name. - Name string `q:"name"` - - // Status will filter by the specified status. - Status string `q:"status"` - - // TenantID will filter by a specific tenant/project ID. - // Setting AllTenants is required to use this. - TenantID string `q:"project_id"` - - // VolumeID will filter by a specified volume ID. - VolumeID string `q:"volume_id"` -} - -// ToSnapshotListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToSnapshotListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns Snapshots optionally limited by the conditions provided in -// ListOpts. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToSnapshotListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return SnapshotPage{pagination.SinglePageBase(r)} - }) -} - -// UpdateMetadataOptsBuilder allows extensions to add additional parameters to -// the Update request. -type UpdateMetadataOptsBuilder interface { - ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) -} - -// UpdateMetadataOpts contain options for updating an existing Snapshot. This -// object is passed to the snapshots.Update function. For more information -// about the parameters, see the Snapshot object. -type UpdateMetadataOpts struct { - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of -// an UpdateMetadataOpts. -func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "") -} - -// UpdateMetadata will update the Snapshot with provided information. To -// extract the updated Snapshot from the response, call the ExtractMetadata -// method on the UpdateMetadataResult. -func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { - b, err := opts.ToSnapshotUpdateMetadataMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateMetadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// IDFromName is a convienience function that returns a snapshot's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - - listOpts := ListOpts{ - Name: name, - } - - pages, err := List(client, listOpts).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractSnapshots(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "snapshot"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go deleted file mode 100644 index 0b444d08a..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go +++ /dev/null @@ -1,120 +0,0 @@ -package snapshots - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Snapshot contains all the information associated with a Cinder Snapshot. -type Snapshot struct { - // Unique identifier. - ID string `json:"id"` - - // Date created. - CreatedAt time.Time `json:"-"` - - // Date updated. - UpdatedAt time.Time `json:"-"` - - // Display name. - Name string `json:"name"` - - // Display description. - Description string `json:"description"` - - // ID of the Volume from which this Snapshot was created. - VolumeID string `json:"volume_id"` - - // Currect status of the Snapshot. - Status string `json:"status"` - - // Size of the Snapshot, in GB. - Size int `json:"size"` - - // User-defined key-value pairs. - Metadata map[string]string `json:"metadata"` -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} - -// SnapshotPage is a pagination.Pager that is returned from a call to the List function. -type SnapshotPage struct { - pagination.SinglePageBase -} - -func (r *Snapshot) UnmarshalJSON(b []byte) error { - type tmp Snapshot - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Snapshot(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - - return err -} - -// IsEmpty returns true if a SnapshotPage contains no Snapshots. -func (r SnapshotPage) IsEmpty() (bool, error) { - volumes, err := ExtractSnapshots(r) - return len(volumes) == 0, err -} - -// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. -func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { - var s struct { - Snapshots []Snapshot `json:"snapshots"` - } - err := (r.(SnapshotPage)).ExtractInto(&s) - return s.Snapshots, err -} - -// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. -type UpdateMetadataResult struct { - commonResult -} - -// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. -func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { - if r.Err != nil { - return nil, r.Err - } - m := r.Body.(map[string]interface{})["metadata"] - return m.(map[string]interface{}), nil -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Snapshot object out of the commonResult object. -func (r commonResult) Extract() (*Snapshot, error) { - var s struct { - Snapshot *Snapshot `json:"snapshot"` - } - err := r.ExtractInto(&s) - return s.Snapshot, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go deleted file mode 100644 index 778043749..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go +++ /dev/null @@ -1,27 +0,0 @@ -package snapshots - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("snapshots") -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("snapshots", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} - -func listURL(c *gophercloud.ServiceClient) string { - return createURL(c) -} - -func metadataURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("snapshots", id, "metadata") -} - -func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { - return metadataURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go deleted file mode 100644 index 40fbb827b..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package snapshots - -import ( - "github.com/gophercloud/gophercloud" -) - -// WaitForStatus will continually poll the resource, checking for a particular -// status. It will do this for the amount of seconds defined. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go deleted file mode 100644 index 307b8b12d..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package volumes provides information and interaction with volumes in the -// OpenStack Block Storage service. A volume is a detachable block storage -// device, akin to a USB hard drive. It can only be attached to one instance at -// a time. -package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go deleted file mode 100644 index c27ddbf67..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go +++ /dev/null @@ -1,235 +0,0 @@ -package volumes - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToVolumeCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains options for creating a Volume. This object is passed to -// the volumes.Create function. For more information about these parameters, -// see the Volume object. -type CreateOpts struct { - // The size of the volume, in GB - Size int `json:"size" required:"true"` - // The availability zone - AvailabilityZone string `json:"availability_zone,omitempty"` - // ConsistencyGroupID is the ID of a consistency group - ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` - // The volume description - Description string `json:"description,omitempty"` - // One or more metadata key and value pairs to associate with the volume - Metadata map[string]string `json:"metadata,omitempty"` - // The volume name - Name string `json:"name,omitempty"` - // the ID of the existing volume snapshot - SnapshotID string `json:"snapshot_id,omitempty"` - // SourceReplica is a UUID of an existing volume to replicate with - SourceReplica string `json:"source_replica,omitempty"` - // the ID of the existing volume - SourceVolID string `json:"source_volid,omitempty"` - // The ID of the image from which you want to create the volume. - // Required to create a bootable volume. - ImageID string `json:"imageRef,omitempty"` - // The associated volume type - VolumeType string `json:"volume_type,omitempty"` -} - -// ToVolumeCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volume") -} - -// Create will create a new Volume based on the values in CreateOpts. To extract -// the Volume object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToVolumeCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - return -} - -// DeleteOptsBuilder allows extensions to add additional parameters to the -// Delete request. -type DeleteOptsBuilder interface { - ToVolumeDeleteQuery() (string, error) -} - -// DeleteOpts contains options for deleting a Volume. This object is passed to -// the volumes.Delete function. -type DeleteOpts struct { - // Delete all snapshots of this volume as well. - Cascade bool `q:"cascade"` -} - -// ToLoadBalancerDeleteQuery formats a DeleteOpts into a query string. -func (opts DeleteOpts) ToVolumeDeleteQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// Delete will delete the existing Volume with the provided ID. -func Delete(client *gophercloud.ServiceClient, id string, opts DeleteOptsBuilder) (r DeleteResult) { - url := deleteURL(client, id) - if opts != nil { - query, err := opts.ToVolumeDeleteQuery() - if err != nil { - r.Err = err - return - } - url += query - } - _, r.Err = client.Delete(url, nil) - return -} - -// Get retrieves the Volume with the provided ID. To extract the Volume object -// from the response, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToVolumeListQuery() (string, error) -} - -// ListOpts holds options for listing Volumes. It is passed to the volumes.List -// function. -type ListOpts struct { - // AllTenants will retrieve volumes of all tenants/projects. - AllTenants bool `q:"all_tenants"` - - // Metadata will filter results based on specified metadata. - Metadata map[string]string `q:"metadata"` - - // Name will filter by the specified volume name. - Name string `q:"name"` - - // Status will filter by the specified status. - Status string `q:"status"` - - // TenantID will filter by a specific tenant/project ID. - // Setting AllTenants is required for this. - TenantID string `q:"project_id"` - - // Comma-separated list of sort keys and optional sort directions in the - // form of [:]. - Sort string `q:"sort"` - - // Requests a page size of items. - Limit int `q:"limit"` - - // Used in conjunction with limit to return a slice of items. - Offset int `q:"offset"` - - // The ID of the last-seen item. - Marker string `q:"marker"` -} - -// ToVolumeListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToVolumeListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// List returns Volumes optionally limited by the conditions provided in ListOpts. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToVolumeListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return VolumePage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToVolumeUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contain options for updating an existing Volume. This object is passed -// to the volumes.Update function. For more information about the parameters, see -// the Volume object. -type UpdateOpts struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// ToVolumeUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "volume") -} - -// Update will update the Volume with provided information. To extract the updated -// Volume from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToVolumeUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// IDFromName is a convienience function that returns a server's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - count := 0 - id := "" - - listOpts := ListOpts{ - Name: name, - } - - pages, err := List(client, listOpts).AllPages() - if err != nil { - return "", err - } - - all, err := ExtractVolumes(pages) - if err != nil { - return "", err - } - - for _, s := range all { - if s.Name == name { - count++ - id = s.ID - } - } - - switch count { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} - case 1: - return id, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go deleted file mode 100644 index 96572b01b..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go +++ /dev/null @@ -1,167 +0,0 @@ -package volumes - -import ( - "encoding/json" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -type Attachment struct { - AttachedAt time.Time `json:"-"` - AttachmentID string `json:"attachment_id"` - Device string `json:"device"` - HostName string `json:"host_name"` - ID string `json:"id"` - ServerID string `json:"server_id"` - VolumeID string `json:"volume_id"` -} - -func (r *Attachment) UnmarshalJSON(b []byte) error { - type tmp Attachment - var s struct { - tmp - AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Attachment(s.tmp) - - r.AttachedAt = time.Time(s.AttachedAt) - - return err -} - -// Volume contains all the information associated with an OpenStack Volume. -type Volume struct { - // Unique identifier for the volume. - ID string `json:"id"` - // Current status of the volume. - Status string `json:"status"` - // Size of the volume in GB. - Size int `json:"size"` - // AvailabilityZone is which availability zone the volume is in. - AvailabilityZone string `json:"availability_zone"` - // The date when this volume was created. - CreatedAt time.Time `json:"-"` - // The date when this volume was last updated - UpdatedAt time.Time `json:"-"` - // Instances onto which the volume is attached. - Attachments []Attachment `json:"attachments"` - // Human-readable display name for the volume. - Name string `json:"name"` - // Human-readable description for the volume. - Description string `json:"description"` - // The type of volume to create, either SATA or SSD. - VolumeType string `json:"volume_type"` - // The ID of the snapshot from which the volume was created - SnapshotID string `json:"snapshot_id"` - // The ID of another block storage volume from which the current volume was created - SourceVolID string `json:"source_volid"` - // Arbitrary key-value pairs defined by the user. - Metadata map[string]string `json:"metadata"` - // UserID is the id of the user who created the volume. - UserID string `json:"user_id"` - // Indicates whether this is a bootable volume. - Bootable string `json:"bootable"` - // Encrypted denotes if the volume is encrypted. - Encrypted bool `json:"encrypted"` - // ReplicationStatus is the status of replication. - ReplicationStatus string `json:"replication_status"` - // ConsistencyGroupID is the consistency group ID. - ConsistencyGroupID string `json:"consistencygroup_id"` - // Multiattach denotes if the volume is multi-attach capable. - Multiattach bool `json:"multiattach"` -} - -func (r *Volume) UnmarshalJSON(b []byte) error { - type tmp Volume - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - - return err -} - -// VolumePage is a pagination.pager that is returned from a call to the List function. -type VolumePage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if a ListResult contains no Volumes. -func (r VolumePage) IsEmpty() (bool, error) { - volumes, err := ExtractVolumes(r) - return len(volumes) == 0, err -} - -// NextPageURL uses the response's embedded link reference to navigate to the -// next page of results. -func (r VolumePage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"volumes_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. -func ExtractVolumes(r pagination.Page) ([]Volume, error) { - var s []Volume - err := ExtractVolumesInto(r, &s) - return s, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Volume object out of the commonResult object. -func (r commonResult) Extract() (*Volume, error) { - var s Volume - err := r.ExtractInto(&s) - return &s, err -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.Result.ExtractIntoStructPtr(v, "volume") -} - -func ExtractVolumesInto(r pagination.Page, v interface{}) error { - return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// UpdateResult contains the response body and error from an Update request. -type UpdateResult struct { - commonResult -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go deleted file mode 100644 index 170724905..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package volumes - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("volumes") -} - -func listURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("volumes", "detail") -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("volumes", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go deleted file mode 100644 index e86c1b4b4..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package volumes - -import ( - "github.com/gophercloud/gophercloud" -) - -// WaitForStatus will continually poll the resource, checking for a particular -// status. It will do this for the amount of seconds defined. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go deleted file mode 100644 index 50f239711..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ /dev/null @@ -1,438 +0,0 @@ -package openstack - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -const ( - // v2 represents Keystone v2. - // It should never increase beyond 2.0. - v2 = "v2.0" - - // v3 represents Keystone v3. - // The version can be anything from v3 to v3.x. - v3 = "v3" -) - -/* -NewClient prepares an unauthenticated ProviderClient instance. -Most users will probably prefer using the AuthenticatedClient function -instead. - -This is useful if you wish to explicitly control the version of the identity -service that's used for authentication explicitly, for example. - -A basic example of using this would be: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.NewClient(ao.IdentityEndpoint) - client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) -*/ -func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - base, err := utils.BaseEndpoint(endpoint) - if err != nil { - return nil, err - } - - endpoint = gophercloud.NormalizeURL(endpoint) - base = gophercloud.NormalizeURL(base) - - p := new(gophercloud.ProviderClient) - p.IdentityBase = base - p.IdentityEndpoint = endpoint - p.UseTokenLock() - - return p, nil -} - -/* -AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint -specified by the options, acquires a token, and returns a Provider Client -instance that's ready to operate. - -If the full path to a versioned identity endpoint was specified (example: -http://example.com:5000/v3), that path will be used as the endpoint to query. - -If a versionless endpoint was specified (example: http://example.com:5000/), -the endpoint will be queried to determine which versions of the identity service -are available, then chooses the most recent or most supported version. - -Example: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - client, err := NewClient(options.IdentityEndpoint) - if err != nil { - return nil, err - } - - err = Authenticate(client, options) - if err != nil { - return nil, err - } - return client, nil -} - -// Authenticate or re-authenticate against the most recent identity service -// supported at the provided endpoint. -func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - versions := []*utils.Version{ - {ID: v2, Priority: 20, Suffix: "/v2.0/"}, - {ID: v3, Priority: 30, Suffix: "/v3/"}, - } - - chosen, endpoint, err := utils.ChooseVersion(client, versions) - if err != nil { - return err - } - - switch chosen.ID { - case v2: - return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) - case v3: - return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) - default: - // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) - } -} - -// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. -func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - return v2auth(client, "", options, eo) -} - -func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - v2Client, err := NewIdentityV2(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v2Client.Endpoint = endpoint - } - - v2Opts := tokens2.AuthOptions{ - IdentityEndpoint: options.IdentityEndpoint, - Username: options.Username, - Password: options.Password, - TenantID: options.TenantID, - TenantName: options.TenantName, - AllowReauth: options.AllowReauth, - TokenID: options.TokenID, - } - - result := tokens2.Create(v2Client, v2Opts) - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if options.AllowReauth { - // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but - // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, - // this should retry authentication only once - tac := *client - tac.SetThrowaway(true) - tac.ReauthFunc = nil - tac.SetTokenAndAuthResult(nil) - tao := options - tao.AllowReauth = false - client.ReauthFunc = func() error { - err := v2auth(&tac, endpoint, tao, eo) - if err != nil { - return err - } - client.CopyTokenFrom(&tac) - return nil - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) - } - - return nil -} - -// AuthenticateV3 explicitly authenticates against the identity v3 service. -func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - return v3auth(client, "", options, eo) -} - -func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - // Override the generated service endpoint with the one returned by the version endpoint. - v3Client, err := NewIdentityV3(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v3Client.Endpoint = endpoint - } - - result := tokens3.Create(v3Client, opts) - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if opts.CanReauth() { - // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but - // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, - // this should retry authentication only once - tac := *client - tac.SetThrowaway(true) - tac.ReauthFunc = nil - tac.SetTokenAndAuthResult(nil) - var tao tokens3.AuthOptionsBuilder - switch ot := opts.(type) { - case *gophercloud.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - case *tokens3.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - default: - tao = opts - } - client.ReauthFunc = func() error { - err := v3auth(&tac, endpoint, tao, eo) - if err != nil { - return err - } - client.CopyTokenFrom(&tac) - return nil - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) - } - - return nil -} - -// NewIdentityV2 creates a ServiceClient that may be used to interact with the -// v2 identity service. -func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v2.0/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -// NewIdentityV3 creates a ServiceClient that may be used to access the v3 -// identity service. -func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v3/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - // Ensure endpoint still has a suffix of v3. - // This is because EndpointLocator might have found a versionless - // endpoint or the published endpoint is still /v2.0. In both - // cases, we need to fix the endpoint to point to /v3. - base, err := utils.BaseEndpoint(endpoint) - if err != nil { - return nil, err - } - - base = gophercloud.NormalizeURL(base) - - endpoint = base + "v3/" - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { - sc := new(gophercloud.ServiceClient) - eo.ApplyDefaults(clientType) - url, err := client.EndpointLocator(eo) - if err != nil { - return sc, err - } - sc.ProviderClient = client - sc.Endpoint = url - sc.Type = clientType - return sc, nil -} - -// NewBareMetalV1 creates a ServiceClient that may be used with the v1 -// bare metal package. -func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal") -} - -// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 -// bare metal introspection package. -func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal-inspector") -} - -// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 -// object storage package. -func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "object-store") -} - -// NewComputeV2 creates a ServiceClient that may be used with the v2 compute -// package. -func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "compute") -} - -// NewNetworkV2 creates a ServiceClient that may be used with the v2 network -// package. -func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "network") - sc.ResourceBase = sc.Endpoint + "v2.0/" - return sc, err -} - -// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 -// block storage service. -func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volume") -} - -// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 -// block storage service. -func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev2") -} - -// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. -func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev3") -} - -// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. -func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "sharev2") -} - -// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 -// CDN service. -func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "cdn") -} - -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 -// orchestration service. -func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "orchestration") -} - -// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. -func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "database") -} - -// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS -// service. -func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "dns") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 -// image service. -func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "image") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 -// load balancer service. -func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "load-balancer") - sc.ResourceBase = sc.Endpoint + "v2.0/" - return sc, err -} - -// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering -// package. -func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "clustering") -} - -// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging -// service. -func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "messaging") - sc.MoreHeaders = map[string]string{"Client-ID": clientID} - return sc, err -} - -// NewContainerV1 creates a ServiceClient that may be used with v1 container package -func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container") -} - -// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key -// manager service. -func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "key-manager") - sc.ResourceBase = sc.Endpoint + "v1/" - return sc, err -} - -// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management -// package. -func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container-infra") -} - -// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. -func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "workflowv2") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go deleted file mode 100644 index cedf1f4d3..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Package openstack contains resources for the individual OpenStack projects -supported in Gophercloud. It also includes functions to authenticate to an -OpenStack cloud and for provisioning various service-level clients. - -Example of Creating a Service Client - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go deleted file mode 100644 index 12c8aebcf..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go +++ /dev/null @@ -1,107 +0,0 @@ -package openstack - -import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -/* -V2EndpointURL discovers the endpoint URL for a specific service from a -ServiceCatalog acquired during the v2 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - err := &ErrMultipleMatchingEndpointsV2{} - err.Endpoints = endpoints - return "", err - } - - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} - -/* -V3EndpointURL discovers the endpoint URL for a specific service from a Catalog -acquired during the v3 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Interface, - // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go deleted file mode 100644 index df410b1c6..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -// ErrEndpointNotFound is the error when no suitable endpoint can be found -// in the user's catalog -type ErrEndpointNotFound struct{ gophercloud.BaseError } - -func (e ErrEndpointNotFound) Error() string { - return "No suitable endpoint could be found in the service catalog." -} - -// ErrInvalidAvailabilityProvided is the error when an invalid endpoint -// availability is provided -type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } - -func (e ErrInvalidAvailabilityProvided) Error() string { - return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) -} - -// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint -// for the given options is found in the v2 catalog -type ErrMultipleMatchingEndpointsV2 struct { - gophercloud.BaseError - Endpoints []tokens2.Endpoint -} - -func (e ErrMultipleMatchingEndpointsV2) Error() string { - return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) -} - -// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint -// for the given options is found in the v3 catalog -type ErrMultipleMatchingEndpointsV3 struct { - gophercloud.BaseError - Endpoints []tokens3.Endpoint -} - -func (e ErrMultipleMatchingEndpointsV3) Error() string { - return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) -} - -// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not -// found -type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoAuthURL) Error() string { - return "Environment variable OS_AUTH_URL needs to be set." -} - -// ErrNoUsername is the error when the OS_USERNAME environment variable is not -// found -type ErrNoUsername struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoUsername) Error() string { - return "Environment variable OS_USERNAME needs to be set." -} - -// ErrNoPassword is the error when the OS_PASSWORD environment variable is not -// found -type ErrNoPassword struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoPassword) Error() string { - return "Environment variable OS_PASSWORD needs to be set." -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go deleted file mode 100644 index 45623369e..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package tenants provides information and interaction with the -tenants API resource for the OpenStack Identity service. - -See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants -for more information. - -Example to List Tenants - - listOpts := tenants.ListOpts{ - Limit: 2, - } - - allPages, err := tenants.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allTenants, err := tenants.ExtractTenants(allPages) - if err != nil { - panic(err) - } - - for _, tenant := range allTenants { - fmt.Printf("%+v\n", tenant) - } - -Example to Create a Tenant - - createOpts := tenants.CreateOpts{ - Name: "tenant_name", - Description: "this is a tenant", - Enabled: gophercloud.Enabled, - } - - tenant, err := tenants.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - updateOpts := tenants.UpdateOpts{ - Description: "this is a new description", - Enabled: gophercloud.Disabled, - } - - tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - err := tenants.Delete(identitYClient, tenantID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go deleted file mode 100644 index f21a58f10..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go +++ /dev/null @@ -1,116 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts filters the Tenants that are returned by the List call. -type ListOpts struct { - // Marker is the ID of the last Tenant on the previous page. - Marker string `q:"marker"` - - // Limit specifies the page size. - Limit int `q:"limit"` -} - -// List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { - url := listURL(client) - if opts != nil { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return pagination.Pager{Err: err} - } - url += q.String() - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return TenantPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOpts represents the options needed when creating new tenant. -type CreateOpts struct { - // Name is the name of the tenant. - Name string `json:"name" required:"true"` - - // Description is the description of the tenant. - Description string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// CreateOptsBuilder enables extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToTenantCreateMap() (map[string]interface{}, error) -} - -// ToTenantCreateMap assembles a request body based on the contents of -// a CreateOpts. -func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Create is the operation responsible for creating new tenant. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToTenantCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Get requests details on a single tenant by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToTenantUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the base attributes that may be updated on an existing -// tenant. -type UpdateOpts struct { - // Name is the name of the tenant. - Name string `json:"name,omitempty"` - - // Description is the description of the tenant. - Description *string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// ToTenantUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Update is the operation responsible for updating exist tenants by their TenantID. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToTenantUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete is the operation responsible for permanently deleting a tenant. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go deleted file mode 100644 index bb6c2c6b0..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go +++ /dev/null @@ -1,91 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Tenant is a grouping of users in the identity service. -type Tenant struct { - // ID is a unique identifier for this tenant. - ID string `json:"id"` - - // Name is a friendlier user-facing name for this tenant. - Name string `json:"name"` - - // Description is a human-readable explanation of this Tenant's purpose. - Description string `json:"description"` - - // Enabled indicates whether or not a tenant is active. - Enabled bool `json:"enabled"` -} - -// TenantPage is a single page of Tenant results. -type TenantPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Tenants contains any results. -func (r TenantPage) IsEmpty() (bool, error) { - tenants, err := ExtractTenants(r) - return len(tenants) == 0, err -} - -// NextPageURL extracts the "next" link from the tenants_links section of the result. -func (r TenantPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"tenants_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractTenants returns a slice of Tenants contained in a single page of -// results. -func ExtractTenants(r pagination.Page) ([]Tenant, error) { - var s struct { - Tenants []Tenant `json:"tenants"` - } - err := (r.(TenantPage)).ExtractInto(&s) - return s.Tenants, err -} - -type tenantResult struct { - gophercloud.Result -} - -// Extract interprets any tenantResults as a Tenant. -func (r tenantResult) Extract() (*Tenant, error) { - var s struct { - Tenant *Tenant `json:"tenant"` - } - err := r.ExtractInto(&s) - return s.Tenant, err -} - -// GetResult is the response from a Get request. Call its Extract method to -// interpret it as a Tenant. -type GetResult struct { - tenantResult -} - -// CreateResult is the response from a Create request. Call its Extract method -// to interpret it as a Tenant. -type CreateResult struct { - tenantResult -} - -// DeleteResult is the response from a Get request. Call its ExtractErr method -// to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// UpdateResult is the response from a Update request. Call its Extract method -// to interpret it as a Tenant. -type UpdateResult struct { - tenantResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go deleted file mode 100644 index 0f0266907..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package tenants - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func getURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func updateURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go deleted file mode 100644 index 5375eea87..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 - -Example to Create an Unscoped Token from a Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "pass" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant ID and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantID: "fc394f2ab2df4114bde39905f800dc57" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant Name and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantName: "tenantname" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } -*/ -package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go deleted file mode 100644 index ab32368cc..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go +++ /dev/null @@ -1,103 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// PasswordCredentialsV2 represents the required options to authenticate -// with a username and password. -type PasswordCredentialsV2 struct { - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` -} - -// TokenCredentialsV2 represents the required options to authenticate -// with a token. -type TokenCredentialsV2 struct { - ID string `json:"id,omitempty" required:"true"` -} - -// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the -// AuthOptionsBuilder interface. -type AuthOptionsV2 struct { - PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // TokenCredentials allows users to authenticate (possibly as another user) - // with an authentication token ID. - TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` -} - -// AuthOptionsBuilder allows extensions to add additional parameters to the -// token create request. -type AuthOptionsBuilder interface { - // ToTokenCreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV2CreateMap() (map[string]interface{}, error) -} - -// AuthOptions are the valid options for Openstack Identity v2 authentication. -// For field descriptions, see gophercloud.AuthOptions. -type AuthOptions struct { - IdentityEndpoint string `json:"-"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - AllowReauth bool `json:"-"` - TokenID string -} - -// ToTokenV2CreateMap builds a token request body from the given AuthOptions. -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - v2Opts := AuthOptionsV2{ - TenantID: opts.TenantID, - TenantName: opts.TenantName, - } - - if opts.Password != "" { - v2Opts.PasswordCredentials = &PasswordCredentialsV2{ - Username: opts.Username, - Password: opts.Password, - } - } else { - v2Opts.TokenCredentials = &TokenCredentialsV2{ - ID: opts.TokenID, - } - } - - b, err := gophercloud.BuildRequestBody(v2Opts, "auth") - if err != nil { - return nil, err - } - return b, nil -} - -// Create authenticates to the identity service and attempts to acquire a Token. -// Generally, rather than interact with this call directly, end users should -// call openstack.AuthenticatedClient(), which abstracts all of the gory details -// about navigating service catalogs and such. -func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { - b, err := auth.ToTokenV2CreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - return -} - -// Get validates and retrieves information for user's token. -func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { - _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go deleted file mode 100644 index ee5da37f4..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go +++ /dev/null @@ -1,174 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" -) - -// Token provides only the most basic information related to an authentication -// token. -type Token struct { - // ID provides the primary means of identifying a user to the OpenStack API. - // OpenStack defines this field as an opaque value, so do not depend on its - // content. It is safe, however, to compare for equality. - ID string - - // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the - // authentication token becomes invalid. After this point in time, future - // API requests made using this authentication token will respond with - // errors. Either the caller will need to reauthenticate manually, or more - // preferably, the caller should exploit automatic re-authentication. - // See the AuthOptions structure for more details. - ExpiresAt time.Time - - // Tenant provides information about the tenant to which this token grants - // access. - Tenant tenants.Tenant -} - -// Role is a role for a user. -type Role struct { - Name string `json:"name"` -} - -// User is an OpenStack user. -type User struct { - ID string `json:"id"` - Name string `json:"name"` - UserName string `json:"username"` - Roles []Role `json:"roles"` -} - -// Endpoint represents a single API endpoint offered by a service. -// It provides the public and internal URLs, if supported, along with a region -// specifier, again if provided. -// -// The significance of the Region field will depend upon your provider. -// -// In addition, the interface offered by the service will have version -// information associated with it through the VersionId, VersionInfo, and -// VersionList fields, if provided or supported. -// -// In all cases, fields which aren't supported by the provider and service -// combined will assume a zero-value (""). -type Endpoint struct { - TenantID string `json:"tenantId"` - PublicURL string `json:"publicURL"` - InternalURL string `json:"internalURL"` - AdminURL string `json:"adminURL"` - Region string `json:"region"` - VersionID string `json:"versionId"` - VersionInfo string `json:"versionInfo"` - VersionList string `json:"versionList"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V2 service -// catalog listing. -// -// Each class of service, such as cloud DNS or block storage services, will have -// a single CatalogEntry representing it. -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may assign - // their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry -} - -// CreateResult is the response from a Create request. Use ExtractToken() to -// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a -// service catalog. -type CreateResult struct { - gophercloud.Result -} - -// GetResult is the deferred response from a Get call, which is the same with a -// Created token. Use ExtractUser() to interpret it as a User. -type GetResult struct { - CreateResult -} - -// ExtractToken returns the just-created Token from a CreateResult. -func (r CreateResult) ExtractToken() (*Token, error) { - var s struct { - Access struct { - Token struct { - Expires string `json:"expires"` - ID string `json:"id"` - Tenant tenants.Tenant `json:"tenant"` - } `json:"token"` - } `json:"access"` - } - - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) - if err != nil { - return nil, err - } - - return &Token{ - ID: s.Access.Token.ID, - ExpiresAt: expiresTs, - Tenant: s.Access.Token.Tenant, - }, nil -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r CreateResult) ExtractTokenID() (string, error) { - var s struct { - Access struct { - Token struct { - ID string `json:"id"` - } `json:"token"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return s.Access.Token.ID, err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s struct { - Access struct { - Entries []CatalogEntry `json:"serviceCatalog"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &ServiceCatalog{Entries: s.Access.Entries}, err -} - -// ExtractUser returns the User from a GetResult. -func (r GetResult) ExtractUser() (*User, error) { - var s struct { - Access struct { - User User `json:"user"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &s.Access.User, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go deleted file mode 100644 index ee0a28f20..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// CreateURL generates the URL used to create new Tokens. -func CreateURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tokens") -} - -// GetURL generates the URL used to Validate Tokens. -func GetURL(client *gophercloud.ServiceClient, token string) string { - return client.ServiceURL("tokens", token) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go deleted file mode 100644 index 966e128f1..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 - -Example to Create a Token From a Username and Password - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Username, Password, and Domain - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainID: "default", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - - authOptions = tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainName: "default", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Token - - authOptions := tokens.AuthOptions{ - TokenID: "token_id", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project ID Scope - - scope := tokens.Scope{ - ProjectID: "0fe36e73809d46aeae6705c39077b1b3", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Domain ID Scope - - scope := tokens.Scope{ - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project Name Scope - - scope := tokens.Scope{ - ProjectName: "project_name", - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -*/ -package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go deleted file mode 100644 index e4d766b23..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ /dev/null @@ -1,162 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// Scope allows a created token to be limited to a specific domain or project. -type Scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string -} - -// AuthOptionsBuilder provides the ability for extensions to add additional -// parameters to AuthOptions. Extensions must satisfy all required methods. -type AuthOptionsBuilder interface { - // ToTokenV3CreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) - ToTokenV3ScopeMap() (map[string]interface{}, error) - CanReauth() bool -} - -// AuthOptions represents options for authenticating a user. -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed - // by all of the identity services, it will often be populated by a - // provider-level function. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"id,omitempty"` - - Password string `json:"password,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud - // to cache your credentials in memory, and to allow Gophercloud to attempt - // to re-authenticate automatically if/when your token expires. If you set - // it to false, it will not cache these settings, but re-authentication will - // not be possible. This setting defaults to false. - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - // Authentication through Application Credentials requires supplying name, project and secret - // For project we can use TenantID - ApplicationCredentialID string `json:"-"` - ApplicationCredentialName string `json:"-"` - ApplicationCredentialSecret string `json:"-"` - - Scope Scope `json:"-"` -} - -// ToTokenV3CreateMap builds a request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - gophercloudAuthOpts := gophercloud.AuthOptions{ - Username: opts.Username, - UserID: opts.UserID, - Password: opts.Password, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - AllowReauth: opts.AllowReauth, - TokenID: opts.TokenID, - ApplicationCredentialID: opts.ApplicationCredentialID, - ApplicationCredentialName: opts.ApplicationCredentialName, - ApplicationCredentialSecret: opts.ApplicationCredentialSecret, - } - - return gophercloudAuthOpts.ToTokenV3CreateMap(scope) -} - -// ToTokenV3CreateMap builds a scope request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - scope := gophercloud.AuthScope(opts.Scope) - - gophercloudAuthOpts := gophercloud.AuthOptions{ - Scope: &scope, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - } - - return gophercloudAuthOpts.ToTokenV3ScopeMap() -} - -func (opts *AuthOptions) CanReauth() bool { - return opts.AllowReauth -} - -func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { - return map[string]string{ - "X-Subject-Token": subjectToken, - } -} - -// Create authenticates and either generates a new token, or changes the Scope -// of an existing token. -func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { - scope, err := opts.ToTokenV3ScopeMap() - if err != nil { - r.Err = err - return - } - - b, err := opts.ToTokenV3CreateMap(scope) - if err != nil { - r.Err = err - return - } - - resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - r.Err = err - if resp != nil { - r.Header = resp.Header - } - return -} - -// Get validates and retrieves information about another token. -func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { - resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 203}, - }) - if resp != nil { - r.Header = resp.Header - } - r.Err = err - return -} - -// Validate determines if a specified token is valid or not. -func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 204, 404}, - }) - if err != nil { - return false, err - } - - return resp.StatusCode == 200 || resp.StatusCode == 204, nil -} - -// Revoke immediately makes specified token invalid. -func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { - _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go deleted file mode 100644 index 6f26c96bc..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go +++ /dev/null @@ -1,178 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" -) - -// Endpoint represents a single API endpoint offered by a service. -// It matches either a public, internal or admin URL. -// If supported, it contains a region specifier, again if provided. -// The significance of the Region field will depend upon your provider. -type Endpoint struct { - ID string `json:"id"` - Region string `json:"region"` - RegionID string `json:"region_id"` - Interface string `json:"interface"` - URL string `json:"url"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V3 service -// catalog listing. Each class of service, such as cloud DNS or block storage -// services, could have multiple CatalogEntry representing it (one by interface -// type, e.g public, admin or internal). -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Service ID - ID string `json:"id"` - - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may - // assign their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry `json:"catalog"` -} - -// Domain provides information about the domain to which this token grants -// access. -type Domain struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// User represents a user resource that exists in the Identity Service. -type User struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// Role provides information about roles to which User is authorized. -type Role struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// Project provides information about project to which User is authorized. -type Project struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// commonResult is the response from a request. A commonResult has various -// methods which can be used to extract different details about the result. -type commonResult struct { - gophercloud.Result -} - -// Extract is a shortcut for ExtractToken. -// This function is deprecated and still present for backward compatibility. -func (r commonResult) Extract() (*Token, error) { - return r.ExtractToken() -} - -// ExtractToken interprets a commonResult as a Token. -func (r commonResult) ExtractToken() (*Token, error) { - var s Token - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - // Parse the token itself from the stored headers. - s.ID = r.Header.Get("X-Subject-Token") - - return &s, err -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r CreateResult) ExtractTokenID() (string, error) { - return r.Header.Get("X-Subject-Token"), r.Err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s ServiceCatalog - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractUser returns the User that is the owner of the Token. -func (r commonResult) ExtractUser() (*User, error) { - var s struct { - User *User `json:"user"` - } - err := r.ExtractInto(&s) - return s.User, err -} - -// ExtractRoles returns Roles to which User is authorized. -func (r commonResult) ExtractRoles() ([]Role, error) { - var s struct { - Roles []Role `json:"roles"` - } - err := r.ExtractInto(&s) - return s.Roles, err -} - -// ExtractProject returns Project to which User is authorized. -func (r commonResult) ExtractProject() (*Project, error) { - var s struct { - Project *Project `json:"project"` - } - err := r.ExtractInto(&s) - return s.Project, err -} - -// CreateResult is the response from a Create request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type CreateResult struct { - commonResult -} - -// GetResult is the response from a Get request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type GetResult struct { - commonResult -} - -// RevokeResult is response from a Revoke request. -type RevokeResult struct { - commonResult -} - -// Token is a string that grants a user access to a controlled set of services -// in an OpenStack provider. Each Token is valid for a set length of time. -type Token struct { - // ID is the issued token. - ID string `json:"id"` - - // ExpiresAt is the timestamp at which this token will no longer be accepted. - ExpiresAt time.Time `json:"expires_at"` -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.ExtractIntoStructPtr(v, "token") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go deleted file mode 100644 index 2f864a31c..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -func tokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("auth", "tokens") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go deleted file mode 100644 index 2028b4831..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go +++ /dev/null @@ -1,381 +0,0 @@ -package shares - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToShareCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains the options for create a Share. This object is -// passed to shares.Create(). For more information about these parameters, -// please refer to the Share object, or the shared file systems API v2 -// documentation -type CreateOpts struct { - // Defines the share protocol to use - ShareProto string `json:"share_proto" required:"true"` - // Size in GB - Size int `json:"size" required:"true"` - // Defines the share name - Name string `json:"name,omitempty"` - // Share description - Description string `json:"description,omitempty"` - // DisplayName is equivalent to Name. The API supports using both - // This is an inherited attribute from the block storage API - DisplayName string `json:"display_name,omitempty"` - // DisplayDescription is equivalent to Description. The API supports using both - // This is an inherited attribute from the block storage API - DisplayDescription string `json:"display_description,omitempty"` - // ShareType defines the sharetype. If omitted, a default share type is used - ShareType string `json:"share_type,omitempty"` - // VolumeType is deprecated but supported. Either ShareType or VolumeType can be used - VolumeType string `json:"volume_type,omitempty"` - // The UUID from which to create a share - SnapshotID string `json:"snapshot_id,omitempty"` - // Determines whether or not the share is public - IsPublic *bool `json:"is_public,omitempty"` - // Key value pairs of user defined metadata - Metadata map[string]string `json:"metadata,omitempty"` - // The UUID of the share network to which the share belongs to - ShareNetworkID string `json:"share_network_id,omitempty"` - // The UUID of the consistency group to which the share belongs to - ConsistencyGroupID string `json:"consistency_group_id,omitempty"` - // The availability zone of the share - AvailabilityZone string `json:"availability_zone,omitempty"` -} - -// ToShareCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToShareCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "share") -} - -// Create will create a new Share based on the values in CreateOpts. To extract -// the Share object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToShareCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// ListOpts holds options for listing Shares. It is passed to the -// shares.List function. -type ListOpts struct { - // (Admin only). Defines whether to list the requested resources for all projects. - AllTenants bool `q:"all_tenants"` - // The share name. - Name string `q:"name"` - // Filters by a share status. - Status string `q:"status"` - // The UUID of the share server. - ShareServerID string `q:"share_server_id"` - // One or more metadata key and value pairs as a dictionary of strings. - Metadata map[string]string `q:"metadata"` - // The extra specifications for the share type. - ExtraSpecs map[string]string `q:"extra_specs"` - // The UUID of the share type. - ShareTypeID string `q:"share_type_id"` - // The maximum number of shares to return. - Limit int `q:"limit"` - // The offset to define start point of share or share group listing. - Offset int `q:"offset"` - // The key to sort a list of shares. - SortKey string `q:"sort_key"` - // The direction to sort a list of shares. - SortDir string `q:"sort_dir"` - // The UUID of the share’s base snapshot to filter the request based on. - SnapshotID string `q:"snapshot_id"` - // The share host name. - Host string `q:"host"` - // The share network ID. - ShareNetworkID string `q:"share_network_id"` - // The UUID of the project in which the share was created. Useful with all_tenants parameter. - ProjectID string `q:"project_id"` - // The level of visibility for the share. - IsPublic *bool `q:"is_public"` - // The UUID of a share group to filter resource. - ShareGroupID string `q:"share_group_id"` - // The export location UUID that can be used to filter shares or share instances. - ExportLocationID string `q:"export_location_id"` - // The export location path that can be used to filter shares or share instances. - ExportLocationPath string `q:"export_location_path"` - // The name pattern that can be used to filter shares, share snapshots, share networks or share groups. - NamePattern string `q:"name~"` - // The description pattern that can be used to filter shares, share snapshots, share networks or share groups. - DescriptionPattern string `q:"description~"` - // Whether to show count in API response or not, default is False. - WithCount bool `q:"with_count"` - // DisplayName is equivalent to Name. The API supports using both - // This is an inherited attribute from the block storage API - DisplayName string `q:"display_name"` - // Equivalent to NamePattern. - DisplayNamePattern string `q:"display_name~"` - // VolumeTypeID is deprecated but supported. Either ShareTypeID or VolumeTypeID can be used - VolumeTypeID string `q:"volume_type_id"` - // The UUID of the share group snapshot. - ShareGroupSnapshotID string `q:"share_group_snapshot_id"` - // DisplayDescription is equivalent to Description. The API supports using both - // This is an inherited attribute from the block storage API - DisplayDescription string `q:"display_description"` - // Equivalent to DescriptionPattern - DisplayDescriptionPattern string `q:"display_description~"` -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToShareListQuery() (string, error) -} - -// ToShareListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToShareListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// ListDetail returns []Share optionally limited by the conditions provided in ListOpts. -func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listDetailURL(client) - if opts != nil { - query, err := opts.ToShareListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - p := SharePage{pagination.MarkerPageBase{PageResult: r}} - p.MarkerPageBase.Owner = p - return p - }) -} - -// Delete will delete an existing Share with the given UUID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// Get will get a single share with given UUID -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// GetExportLocations will get shareID's export locations. -// Client must have Microversion set; minimum supported microversion for GetExportLocations is 2.14. -func GetExportLocations(client *gophercloud.ServiceClient, id string) (r GetExportLocationsResult) { - _, r.Err = client.Get(getExportLocationsURL(client, id), &r.Body, nil) - return -} - -// GrantAccessOptsBuilder allows extensions to add additional parameters to the -// GrantAccess request. -type GrantAccessOptsBuilder interface { - ToGrantAccessMap() (map[string]interface{}, error) -} - -// GrantAccessOpts contains the options for creation of an GrantAccess request. -// For more information about these parameters, please, refer to the shared file systems API v2, -// Share Actions, Grant Access documentation -type GrantAccessOpts struct { - // The access rule type that can be "ip", "cert" or "user". - AccessType string `json:"access_type"` - // The value that defines the access that can be a valid format of IP, cert or user. - AccessTo string `json:"access_to"` - // The access level to the share is either "rw" or "ro". - AccessLevel string `json:"access_level"` -} - -// ToGrantAccessMap assembles a request body based on the contents of a -// GrantAccessOpts. -func (opts GrantAccessOpts) ToGrantAccessMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "allow_access") -} - -// GrantAccess will grant access to a Share based on the values in GrantAccessOpts. To extract -// the GrantAccess object from the response, call the Extract method on the GrantAccessResult. -// Client must have Microversion set; minimum supported microversion for GrantAccess is 2.7. -func GrantAccess(client *gophercloud.ServiceClient, id string, opts GrantAccessOptsBuilder) (r GrantAccessResult) { - b, err := opts.ToGrantAccessMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(grantAccessURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// RevokeAccessOptsBuilder allows extensions to add additional parameters to the -// RevokeAccess request. -type RevokeAccessOptsBuilder interface { - ToRevokeAccessMap() (map[string]interface{}, error) -} - -// RevokeAccessOpts contains the options for creation of a RevokeAccess request. -// For more information about these parameters, please, refer to the shared file systems API v2, -// Share Actions, Revoke Access documentation -type RevokeAccessOpts struct { - AccessID string `json:"access_id"` -} - -// ToRevokeAccessMap assembles a request body based on the contents of a -// RevokeAccessOpts. -func (opts RevokeAccessOpts) ToRevokeAccessMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "deny_access") -} - -// RevokeAccess will revoke an existing access to a Share based on the values in RevokeAccessOpts. -// RevokeAccessResult contains only the error. To extract it, call the ExtractErr method on -// the RevokeAccessResult. Client must have Microversion set; minimum supported microversion -// for RevokeAccess is 2.7. -func RevokeAccess(client *gophercloud.ServiceClient, id string, opts RevokeAccessOptsBuilder) (r RevokeAccessResult) { - b, err := opts.ToRevokeAccessMap() - if err != nil { - r.Err = err - return - } - - _, r.Err = client.Post(revokeAccessURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - - return -} - -// ListAccessRights lists all access rules assigned to a Share based on its id. To extract -// the AccessRight slice from the response, call the Extract method on the ListAccessRightsResult. -// Client must have Microversion set; minimum supported microversion for ListAccessRights is 2.7. -func ListAccessRights(client *gophercloud.ServiceClient, id string) (r ListAccessRightsResult) { - requestBody := map[string]interface{}{"access_list": nil} - _, r.Err = client.Post(listAccessRightsURL(client, id), requestBody, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// ExtendOptsBuilder allows extensions to add additional parameters to the -// Extend request. -type ExtendOptsBuilder interface { - ToShareExtendMap() (map[string]interface{}, error) -} - -// ExtendOpts contains options for extending a Share. -// For more information about these parameters, please, refer to the shared file systems API v2, -// Share Actions, Extend share documentation -type ExtendOpts struct { - // New size in GBs. - NewSize int `json:"new_size"` -} - -// ToShareExtendMap assembles a request body based on the contents of a -// ExtendOpts. -func (opts ExtendOpts) ToShareExtendMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "extend") -} - -// Extend will extend the capacity of an existing share. ExtendResult contains only the error. -// To extract it, call the ExtractErr method on the ExtendResult. -// Client must have Microversion set; minimum supported microversion for Extend is 2.7. -func Extend(client *gophercloud.ServiceClient, id string, opts ExtendOptsBuilder) (r ExtendResult) { - b, err := opts.ToShareExtendMap() - if err != nil { - r.Err = err - return - } - - _, r.Err = client.Post(extendURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - - return -} - -// ShrinkOptsBuilder allows extensions to add additional parameters to the -// Shrink request. -type ShrinkOptsBuilder interface { - ToShareShrinkMap() (map[string]interface{}, error) -} - -// ShrinkOpts contains options for shrinking a Share. -// For more information about these parameters, please, refer to the shared file systems API v2, -// Share Actions, Shrink share documentation -type ShrinkOpts struct { - // New size in GBs. - NewSize int `json:"new_size"` -} - -// ToShareShrinkMap assembles a request body based on the contents of a -// ShrinkOpts. -func (opts ShrinkOpts) ToShareShrinkMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "shrink") -} - -// Shrink will shrink the capacity of an existing share. ShrinkResult contains only the error. -// To extract it, call the ExtractErr method on the ShrinkResult. -// Client must have Microversion set; minimum supported microversion for Shrink is 2.7. -func Shrink(client *gophercloud.ServiceClient, id string, opts ShrinkOptsBuilder) (r ShrinkResult) { - b, err := opts.ToShareShrinkMap() - if err != nil { - r.Err = err - return - } - - _, r.Err = client.Post(shrinkURL(client, id), b, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToShareUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contain options for updating an existing Share. This object is passed -// to the share.Update function. For more information about the parameters, see -// the Share object. -type UpdateOpts struct { - // Share name. Manila share update logic doesn't have a "name" alias. - DisplayName *string `json:"display_name,omitempty"` - // Share description. Manila share update logic doesn't have a "description" alias. - DisplayDescription *string `json:"display_description,omitempty"` - // Determines whether or not the share is public - IsPublic *bool `json:"is_public,omitempty"` -} - -// ToShareUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToShareUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "share") -} - -// Update will update the Share with provided information. To extract the updated -// Share from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToShareUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go deleted file mode 100644 index 0deae373b..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go +++ /dev/null @@ -1,326 +0,0 @@ -package shares - -import ( - "encoding/json" - "net/url" - "strconv" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -const ( - invalidMarker = "-1" -) - -// Share contains all information associated with an OpenStack Share -type Share struct { - // The availability zone of the share - AvailabilityZone string `json:"availability_zone"` - // A description of the share - Description string `json:"description,omitempty"` - // DisplayDescription is inherited from BlockStorage API. - // Both Description and DisplayDescription can be used - DisplayDescription string `json:"display_description,omitempty"` - // DisplayName is inherited from BlockStorage API - // Both DisplayName and Name can be used - DisplayName string `json:"display_name,omitempty"` - // Indicates whether a share has replicas or not. - HasReplicas bool `json:"has_replicas"` - // The host name of the share - Host string `json:"host"` - // The UUID of the share - ID string `json:"id"` - // Indicates the visibility of the share - IsPublic bool `json:"is_public,omitempty"` - // Share links for pagination - Links []map[string]string `json:"links"` - // Key, value -pairs of custom metadata - Metadata map[string]string `json:"metadata,omitempty"` - // The name of the share - Name string `json:"name,omitempty"` - // The UUID of the project to which this share belongs to - ProjectID string `json:"project_id"` - // The share replication type - ReplicationType string `json:"replication_type,omitempty"` - // The UUID of the share network - ShareNetworkID string `json:"share_network_id"` - // The shared file system protocol - ShareProto string `json:"share_proto"` - // The UUID of the share server - ShareServerID string `json:"share_server_id"` - // The UUID of the share type. - ShareType string `json:"share_type"` - // The name of the share type. - ShareTypeName string `json:"share_type_name"` - // Size of the share in GB - Size int `json:"size"` - // UUID of the snapshot from which to create the share - SnapshotID string `json:"snapshot_id"` - // The share status - Status string `json:"status"` - // The task state, used for share migration - TaskState string `json:"task_state"` - // The type of the volume - VolumeType string `json:"volume_type,omitempty"` - // The UUID of the consistency group this share belongs to - ConsistencyGroupID string `json:"consistency_group_id"` - // Used for filtering backends which either support or do not support share snapshots - SnapshotSupport bool `json:"snapshot_support"` - SourceCgsnapshotMemberID string `json:"source_cgsnapshot_member_id"` - // Timestamp when the share was created - CreatedAt time.Time `json:"-"` - // Timestamp when the share was updated - UpdatedAt time.Time `json:"-"` -} - -func (r *Share) UnmarshalJSON(b []byte) error { - type tmp Share - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Share(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - r.UpdatedAt = time.Time(s.UpdatedAt) - - return nil -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Share object from the commonResult -func (r commonResult) Extract() (*Share, error) { - var s struct { - Share *Share `json:"share"` - } - err := r.ExtractInto(&s) - return s.Share, err -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// SharePage is a pagination.pager that is returned from a call to the List function. -type SharePage struct { - pagination.MarkerPageBase -} - -// NextPageURL generates the URL for the page of results after this one. -func (r SharePage) NextPageURL() (string, error) { - currentURL := r.URL - mark, err := r.Owner.LastMarker() - if err != nil { - return "", err - } - if mark == invalidMarker { - return "", nil - } - - q := currentURL.Query() - q.Set("offset", mark) - currentURL.RawQuery = q.Encode() - return currentURL.String(), nil -} - -// LastMarker returns the last offset in a ListResult. -func (r SharePage) LastMarker() (string, error) { - shares, err := ExtractShares(r) - if err != nil { - return invalidMarker, err - } - if len(shares) == 0 { - return invalidMarker, nil - } - - u, err := url.Parse(r.URL.String()) - if err != nil { - return invalidMarker, err - } - queryParams := u.Query() - offset := queryParams.Get("offset") - limit := queryParams.Get("limit") - - // Limit is not present, only one page required - if limit == "" { - return invalidMarker, nil - } - - iOffset := 0 - if offset != "" { - iOffset, err = strconv.Atoi(offset) - if err != nil { - return invalidMarker, err - } - } - iLimit, err := strconv.Atoi(limit) - if err != nil { - return invalidMarker, err - } - iOffset = iOffset + iLimit - offset = strconv.Itoa(iOffset) - - return offset, nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (r SharePage) IsEmpty() (bool, error) { - shares, err := ExtractShares(r) - return len(shares) == 0, err -} - -// ExtractShares extracts and returns a Share slice. It is used while -// iterating over a shares.List call. -func ExtractShares(r pagination.Page) ([]Share, error) { - var s struct { - Shares []Share `json:"shares"` - } - - err := (r.(SharePage)).ExtractInto(&s) - - return s.Shares, err -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// UpdateResult contains the response body and error from an Update request. -type UpdateResult struct { - commonResult -} - -// IDFromName is a convenience function that returns a share's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - r, err := ListDetail(client, &ListOpts{Name: name}).AllPages() - if err != nil { - return "", err - } - - ss, err := ExtractShares(r) - if err != nil { - return "", err - } - - switch len(ss) { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "share"} - case 1: - return ss[0].ID, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: len(ss), ResourceType: "share"} - } -} - -// GetExportLocationsResult contains the result body and error from an -// GetExportLocations request. -type GetExportLocationsResult struct { - gophercloud.Result -} - -// ExportLocation contains all information associated with a share export location -type ExportLocation struct { - // The export location path that should be used for mount operation. - Path string `json:"path"` - // The UUID of the share instance that this export location belongs to. - ShareInstanceID string `json:"share_instance_id"` - // Defines purpose of an export location. - // If set to true, then it is expected to be used for service needs - // and by administrators only. - // If it is set to false, then this export location can be used by end users. - IsAdminOnly bool `json:"is_admin_only"` - // The share export location UUID. - ID string `json:"id"` - // Drivers may use this field to identify which export locations are - // most efficient and should be used preferentially by clients. - // By default it is set to false value. New in version 2.14 - Preferred bool `json:"preferred"` -} - -// Extract will get the Export Locations from the commonResult -func (r GetExportLocationsResult) Extract() ([]ExportLocation, error) { - var s struct { - ExportLocations []ExportLocation `json:"export_locations"` - } - err := r.ExtractInto(&s) - return s.ExportLocations, err -} - -// AccessRight contains all information associated with an OpenStack share -// Grant Access Response -type AccessRight struct { - // The UUID of the share to which you are granted or denied access. - ShareID string `json:"share_id"` - // The access rule type that can be "ip", "cert" or "user". - AccessType string `json:"access_type,omitempty"` - // The value that defines the access that can be a valid format of IP, cert or user. - AccessTo string `json:"access_to,omitempty"` - // The access credential of the entity granted share access. - AccessKey string `json:"access_key,omitempty"` - // The access level to the share is either "rw" or "ro". - AccessLevel string `json:"access_level,omitempty"` - // The state of the access rule - State string `json:"state,omitempty"` - // The access rule ID. - ID string `json:"id"` -} - -// Extract will get the GrantAccess object from the commonResult -func (r GrantAccessResult) Extract() (*AccessRight, error) { - var s struct { - AccessRight *AccessRight `json:"access"` - } - err := r.ExtractInto(&s) - return s.AccessRight, err -} - -// GrantAccessResult contains the result body and error from an GrantAccess request. -type GrantAccessResult struct { - gophercloud.Result -} - -// RevokeAccessResult contains the response body and error from a Revoke access request. -type RevokeAccessResult struct { - gophercloud.ErrResult -} - -// Extract will get a slice of AccessRight objects from the commonResult -func (r ListAccessRightsResult) Extract() ([]AccessRight, error) { - var s struct { - AccessRights []AccessRight `json:"access_list"` - } - err := r.ExtractInto(&s) - return s.AccessRights, err -} - -// ListAccessRightsResult contains the result body and error from a ListAccessRights request. -type ListAccessRightsResult struct { - gophercloud.Result -} - -// ExtendResult contains the response body and error from an Extend request. -type ExtendResult struct { - gophercloud.ErrResult -} - -// ShrinkResult contains the response body and error from a Shrink request. -type ShrinkResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go deleted file mode 100644 index 02fba24d2..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go +++ /dev/null @@ -1,47 +0,0 @@ -package shares - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("shares") -} - -func listDetailURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("shares", "detail") -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id) -} - -func getExportLocationsURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id, "export_locations") -} - -func grantAccessURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id, "action") -} - -func revokeAccessURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id, "action") -} - -func listAccessRightsURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id, "action") -} - -func extendURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id, "action") -} - -func shrinkURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("shares", id, "action") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/requests.go deleted file mode 100644 index 1f2f7d7cc..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/requests.go +++ /dev/null @@ -1,161 +0,0 @@ -package snapshots - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToSnapshotCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains the options for create a Snapshot. This object is -// passed to snapshots.Create(). For more information about these parameters, -// please refer to the Snapshot object, or the shared file systems API v2 -// documentation -type CreateOpts struct { - // The UUID of the share from which to create a snapshot - ShareID string `json:"share_id" required:"true"` - // Defines the snapshot name - Name string `json:"name,omitempty"` - // Defines the snapshot description - Description string `json:"description,omitempty"` - // DisplayName is equivalent to Name. The API supports using both - // This is an inherited attribute from the block storage API - DisplayName string `json:"display_name,omitempty"` - // DisplayDescription is equivalent to Description. The API supports using both - // This is an inherited attribute from the block storage API - DisplayDescription string `json:"display_description,omitempty"` -} - -// ToSnapshotCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "snapshot") -} - -// Create will create a new Snapshot based on the values in CreateOpts. To extract -// the Snapshot object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToSnapshotCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// ListOpts holds options for listing Snapshots. It is passed to the -// snapshots.List function. -type ListOpts struct { - // (Admin only). Defines whether to list the requested resources for all projects. - AllTenants bool `q:"all_tenants"` - // The snapshot name. - Name string `q:"name"` - // Filter by a snapshot description. - Description string `q:"description"` - // Filters by a share from which the snapshot was created. - ShareID string `q:"share_id"` - // Filters by a snapshot size in GB. - Size int `q:"size"` - // Filters by a snapshot status. - Status string `q:"status"` - // The maximum number of snapshots to return. - Limit int `q:"limit"` - // The offset to define start point of snapshot or snapshot group listing. - Offset int `q:"offset"` - // The key to sort a list of snapshots. - SortKey string `q:"sort_key"` - // The direction to sort a list of snapshots. - SortDir string `q:"sort_dir"` - // The UUID of the project in which the snapshot was created. Useful with all_tenants parameter. - ProjectID string `q:"project_id"` - // The name pattern that can be used to filter snapshots, snapshot snapshots, snapshot networks or snapshot groups. - NamePattern string `q:"name~"` - // The description pattern that can be used to filter snapshots, snapshot snapshots, snapshot networks or snapshot groups. - DescriptionPattern string `q:"description~"` -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToSnapshotListQuery() (string, error) -} - -// ToSnapshotListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToSnapshotListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// ListDetail returns []Snapshot optionally limited by the conditions provided in ListOpts. -func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listDetailURL(client) - if opts != nil { - query, err := opts.ToSnapshotListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - p := SnapshotPage{pagination.MarkerPageBase{PageResult: r}} - p.MarkerPageBase.Owner = p - return p - }) -} - -// Delete will delete an existing Snapshot with the given UUID. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} - -// Get will get a single snapshot with given UUID -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToSnapshotUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contain options for updating an existing Snapshot. This object is passed -// to the snapshot.Update function. For more information about the parameters, see -// the Snapshot object. -type UpdateOpts struct { - // Snapshot name. Manila snapshot update logic doesn't have a "name" alias. - DisplayName *string `json:"display_name,omitempty"` - // Snapshot description. Manila snapshot update logic doesn't have a "description" alias. - DisplayDescription *string `json:"display_description,omitempty"` -} - -// ToSnapshotUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToSnapshotUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "snapshot") -} - -// Update will update the Snapshot with provided information. To extract the updated -// Snapshot from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToSnapshotUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/results.go deleted file mode 100644 index 4952dbd34..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/results.go +++ /dev/null @@ -1,193 +0,0 @@ -package snapshots - -import ( - "encoding/json" - "net/url" - "strconv" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -const ( - invalidMarker = "-1" -) - -// Snapshot contains all information associated with an OpenStack Snapshot -type Snapshot struct { - // The UUID of the snapshot - ID string `json:"id"` - // The name of the snapshot - Name string `json:"name,omitempty"` - // A description of the snapshot - Description string `json:"description,omitempty"` - // UUID of the share from which the snapshot was created - ShareID string `json:"share_id"` - // The shared file system protocol - ShareProto string `json:"share_proto"` - // Size of the snapshot share in GB - ShareSize int `json:"share_size"` - // Size of the snapshot in GB - Size int `json:"size"` - // The snapshot status - Status string `json:"status"` - // The UUID of the project in which the snapshot was created - ProjectID string `json:"project_id"` - // Timestamp when the snapshot was created - CreatedAt time.Time `json:"-"` - // Snapshot links for pagination - Links []map[string]string `json:"links"` -} - -func (r *Snapshot) UnmarshalJSON(b []byte) error { - type tmp Snapshot - var s struct { - tmp - CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Snapshot(s.tmp) - - r.CreatedAt = time.Time(s.CreatedAt) - - return nil -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Snapshot object from the commonResult -func (r commonResult) Extract() (*Snapshot, error) { - var s struct { - Snapshot *Snapshot `json:"snapshot"` - } - err := r.ExtractInto(&s) - return s.Snapshot, err -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// SnapshotPage is a pagination.pager that is returned from a call to the List function. -type SnapshotPage struct { - pagination.MarkerPageBase -} - -// NextPageURL generates the URL for the page of results after this one. -func (r SnapshotPage) NextPageURL() (string, error) { - currentURL := r.URL - mark, err := r.Owner.LastMarker() - if err != nil { - return "", err - } - if mark == invalidMarker { - return "", nil - } - - q := currentURL.Query() - q.Set("offset", mark) - currentURL.RawQuery = q.Encode() - return currentURL.String(), nil -} - -// LastMarker returns the last offset in a ListResult. -func (r SnapshotPage) LastMarker() (string, error) { - snapshots, err := ExtractSnapshots(r) - if err != nil { - return invalidMarker, err - } - if len(snapshots) == 0 { - return invalidMarker, nil - } - - u, err := url.Parse(r.URL.String()) - if err != nil { - return invalidMarker, err - } - queryParams := u.Query() - offset := queryParams.Get("offset") - limit := queryParams.Get("limit") - - // Limit is not present, only one page required - if limit == "" { - return invalidMarker, nil - } - - iOffset := 0 - if offset != "" { - iOffset, err = strconv.Atoi(offset) - if err != nil { - return invalidMarker, err - } - } - iLimit, err := strconv.Atoi(limit) - if err != nil { - return invalidMarker, err - } - iOffset = iOffset + iLimit - offset = strconv.Itoa(iOffset) - - return offset, nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (r SnapshotPage) IsEmpty() (bool, error) { - snapshots, err := ExtractSnapshots(r) - return len(snapshots) == 0, err -} - -// ExtractSnapshots extracts and returns a Snapshot slice. It is used while -// iterating over a snapshots.List call. -func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { - var s struct { - Snapshots []Snapshot `json:"snapshots"` - } - - err := (r.(SnapshotPage)).ExtractInto(&s) - - return s.Snapshots, err -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// UpdateResult contains the response body and error from an Update request. -type UpdateResult struct { - commonResult -} - -// IDFromName is a convenience function that returns a snapshot's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - r, err := ListDetail(client, &ListOpts{Name: name}).AllPages() - if err != nil { - return "", err - } - - ss, err := ExtractSnapshots(r) - if err != nil { - return "", err - } - - switch len(ss) { - case 0: - return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} - case 1: - return ss[0].ID, nil - default: - return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: len(ss), ResourceType: "snapshot"} - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/urls.go deleted file mode 100644 index a07e3ec87..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package snapshots - -import "github.com/gophercloud/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("snapshots") -} - -func listDetailURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("snapshots", "detail") -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("snapshots", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("snapshots", id) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("snapshots", id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go deleted file mode 100644 index 40080f7af..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go +++ /dev/null @@ -1,28 +0,0 @@ -package utils - -import ( - "net/url" - "regexp" - "strings" -) - -// BaseEndpoint will return a URL without the /vX.Y -// portion of the URL. -func BaseEndpoint(endpoint string) (string, error) { - u, err := url.Parse(endpoint) - if err != nil { - return "", err - } - - u.RawQuery, u.Fragment = "", "" - - path := u.Path - versionRe := regexp.MustCompile("v[0-9.]+/?") - - if version := versionRe.FindString(path); version != "" { - versionIndex := strings.Index(path, version) - u.Path = path[:versionIndex] - } - - return u.String(), nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go deleted file mode 100644 index 27da19f91..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go +++ /dev/null @@ -1,111 +0,0 @@ -package utils - -import ( - "fmt" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// Version is a supported API version, corresponding to a vN package within the appropriate service. -type Version struct { - ID string - Suffix string - Priority int -} - -var goodStatus = map[string]bool{ - "current": true, - "supported": true, - "stable": true, -} - -// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's -// published versions. -// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. -func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { - type linkResp struct { - Href string `json:"href"` - Rel string `json:"rel"` - } - - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Links []linkResp `json:"links"` - } - - type versionsResp struct { - Values []valueResp `json:"values"` - } - - type response struct { - Versions versionsResp `json:"versions"` - } - - normalize := func(endpoint string) string { - if !strings.HasSuffix(endpoint, "/") { - return endpoint + "/" - } - return endpoint - } - identityEndpoint := normalize(client.IdentityEndpoint) - - // If a full endpoint is specified, check version suffixes for a match first. - for _, v := range recognized { - if strings.HasSuffix(identityEndpoint, v.Suffix) { - return v, identityEndpoint, nil - } - } - - var resp response - _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ - JSONResponse: &resp, - OkCodes: []int{200, 300}, - }) - - if err != nil { - return nil, "", err - } - - var highest *Version - var endpoint string - - for _, value := range resp.Versions.Values { - href := "" - for _, link := range value.Links { - if link.Rel == "self" { - href = normalize(link.Href) - } - } - - for _, version := range recognized { - if strings.Contains(value.ID, version.ID) { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) - } - return version, href, nil - } - - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || version.Priority > highest.Priority { - highest = version - endpoint = href - } - } - } - } - } - - if highest == nil { - return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) - } - if endpoint == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) - } - - return highest, endpoint, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go deleted file mode 100644 index 757295c42..000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/http.go +++ /dev/null @@ -1,60 +0,0 @@ -package pagination - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// PageResult stores the HTTP response that returned the current page of results. -type PageResult struct { - gophercloud.Result - url.URL -} - -// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the -// results, interpreting it as JSON if the content type indicates. -func PageResultFrom(resp *http.Response) (PageResult, error) { - var parsedBody interface{} - - defer resp.Body.Close() - rawBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PageResult{}, err - } - - if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { - err = json.Unmarshal(rawBody, &parsedBody) - if err != nil { - return PageResult{}, err - } - } else { - parsedBody = rawBody - } - - return PageResultFromParsed(resp, parsedBody), err -} - -// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its -// body parsed as JSON (and closed). -func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { - return PageResult{ - Result: gophercloud.Result{ - Body: body, - Header: resp.Header, - }, - URL: *resp.Request.URL, - } -} - -// Request performs an HTTP request and extracts the http.Response from the result. -func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { - return client.Get(url, nil, &gophercloud.RequestOpts{ - MoreHeaders: headers, - OkCodes: []int{200, 204, 300}, - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go deleted file mode 100644 index 3656fb7f8..000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go +++ /dev/null @@ -1,92 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. -type LinkedPageBase struct { - PageResult - - // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. - // If any link along the path is missing, an empty URL will be returned. - // If any link results in an unexpected value type, an error will be returned. - // When left as "nil", []string{"links", "next"} will be used as a default. - LinkPath []string -} - -// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. -// It assumes that the links are available in a "links" element of the top-level response object. -// If this is not the case, override NextPageURL on your result type. -func (current LinkedPageBase) NextPageURL() (string, error) { - var path []string - var key string - - if current.LinkPath == nil { - path = []string{"links", "next"} - } else { - path = current.LinkPath - } - - submap, ok := current.Body.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return "", err - } - - for { - key, path = path[0], path[1:len(path)] - - value, ok := submap[key] - if !ok { - return "", nil - } - - if len(path) > 0 { - submap, ok = value.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - } else { - if value == nil { - // Actual null element. - return "", nil - } - - url, ok := value.(string) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "string" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - - return url, nil - } - } -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current LinkedPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current LinkedPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go deleted file mode 100644 index 52e53bae8..000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go +++ /dev/null @@ -1,58 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. -// For convenience, embed the MarkedPageBase struct. -type MarkerPage interface { - Page - - // LastMarker returns the last "marker" value on this page. - LastMarker() (string, error) -} - -// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. -type MarkerPageBase struct { - PageResult - - // Owner is a reference to the embedding struct. - Owner MarkerPage -} - -// NextPageURL generates the URL for the page of results after this one. -func (current MarkerPageBase) NextPageURL() (string, error) { - currentURL := current.URL - - mark, err := current.Owner.LastMarker() - if err != nil { - return "", err - } - - q := currentURL.Query() - q.Set("marker", mark) - currentURL.RawQuery = q.Encode() - - return currentURL.String(), nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current MarkerPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current MarkerPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go deleted file mode 100644 index 42c0b2dbe..000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ /dev/null @@ -1,251 +0,0 @@ -package pagination - -import ( - "errors" - "fmt" - "net/http" - "reflect" - "strings" - - "github.com/gophercloud/gophercloud" -) - -var ( - // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. - ErrPageNotAvailable = errors.New("The requested page does not exist.") -) - -// Page must be satisfied by the result type of any resource collection. -// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. -// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, -// instead. -// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type -// will need to implement. -type Page interface { - // NextPageURL generates the URL for the page of data that follows this collection. - // Return "" if no such page exists. - NextPageURL() (string, error) - - // IsEmpty returns true if this Page has no items in it. - IsEmpty() (bool, error) - - // GetBody returns the Page Body. This is used in the `AllPages` method. - GetBody() interface{} -} - -// Pager knows how to advance through a specific resource collection, one page at a time. -type Pager struct { - client *gophercloud.ServiceClient - - initialURL string - - createPage func(r PageResult) Page - - firstPage Page - - Err error - - // Headers supplies additional HTTP headers to populate on each paged request. - Headers map[string]string -} - -// NewPager constructs a manually-configured pager. -// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. -func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { - return Pager{ - client: client, - initialURL: initialURL, - createPage: createPage, - } -} - -// WithPageCreator returns a new Pager that substitutes a different page creation function. This is -// useful for overriding List functions in delegation. -func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { - return Pager{ - client: p.client, - initialURL: p.initialURL, - createPage: createPage, - } -} - -func (p Pager) fetchNextPage(url string) (Page, error) { - resp, err := Request(p.client, p.Headers, url) - if err != nil { - return nil, err - } - - remembered, err := PageResultFrom(resp) - if err != nil { - return nil, err - } - - return p.createPage(remembered), nil -} - -// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. -// Return "false" from the handler to prematurely stop iterating. -func (p Pager) EachPage(handler func(Page) (bool, error)) error { - if p.Err != nil { - return p.Err - } - currentURL := p.initialURL - for { - var currentPage Page - - // if first page has already been fetched, no need to fetch it again - if p.firstPage != nil { - currentPage = p.firstPage - p.firstPage = nil - } else { - var err error - currentPage, err = p.fetchNextPage(currentURL) - if err != nil { - return err - } - } - - empty, err := currentPage.IsEmpty() - if err != nil { - return err - } - if empty { - return nil - } - - ok, err := handler(currentPage) - if err != nil { - return err - } - if !ok { - return nil - } - - currentURL, err = currentPage.NextPageURL() - if err != nil { - return err - } - if currentURL == "" { - return nil - } - } -} - -// AllPages returns all the pages from a `List` operation in a single page, -// allowing the user to retrieve all the pages at once. -func (p Pager) AllPages() (Page, error) { - // pagesSlice holds all the pages until they get converted into as Page Body. - var pagesSlice []interface{} - // body will contain the final concatenated Page body. - var body reflect.Value - - // Grab a first page to ascertain the page body type. - firstPage, err := p.fetchNextPage(p.initialURL) - if err != nil { - return nil, err - } - // Store the page type so we can use reflection to create a new mega-page of - // that type. - pageType := reflect.TypeOf(firstPage) - - // if it's a single page, just return the firstPage (first page) - if _, found := pageType.FieldByName("SinglePageBase"); found { - return firstPage, nil - } - - // store the first page to avoid getting it twice - p.firstPage = firstPage - - // Switch on the page body type. Recognized types are `map[string]interface{}`, - // `[]byte`, and `[]interface{}`. - switch pb := firstPage.GetBody().(type) { - case map[string]interface{}: - // key is the map key for the page body if the body type is `map[string]interface{}`. - var key string - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().(map[string]interface{}) - for k, v := range b { - // If it's a linked page, we don't want the `links`, we want the other one. - if !strings.HasSuffix(k, "links") { - // check the field's type. we only want []interface{} (which is really []map[string]interface{}) - switch vt := v.(type) { - case []interface{}: - key = k - pagesSlice = append(pagesSlice, vt...) - } - } - } - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `map[string]interface{}` - body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) - body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) - case []byte: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]byte) - pagesSlice = append(pagesSlice, b) - // seperate pages with a comma - pagesSlice = append(pagesSlice, []byte{10}) - return true, nil - }) - if err != nil { - return nil, err - } - if len(pagesSlice) > 0 { - // Remove the trailing comma. - pagesSlice = pagesSlice[:len(pagesSlice)-1] - } - var b []byte - // Combine the slice of slices in to a single slice. - for _, slice := range pagesSlice { - b = append(b, slice.([]byte)...) - } - // Set body to value of type `bytes`. - body = reflect.New(reflect.TypeOf(b)).Elem() - body.SetBytes(b) - case []interface{}: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]interface{}) - pagesSlice = append(pagesSlice, b...) - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `[]interface{}` - body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) - for i, s := range pagesSlice { - body.Index(i).Set(reflect.ValueOf(s)) - } - default: - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}/[]byte/[]interface{}" - err.Actual = fmt.Sprintf("%T", pb) - return nil, err - } - - // Each `Extract*` function is expecting a specific type of page coming back, - // otherwise the type assertion in those functions will fail. pageType is needed - // to create a type in this method that has the same type that the `Extract*` - // function is expecting and set the Body of that object to the concatenated - // pages. - page := reflect.New(pageType) - // Set the page body to be the concatenated pages. - page.Elem().FieldByName("Body").Set(body) - // Set any additional headers that were pass along. The `objectstorage` pacakge, - // for example, passes a Content-Type header. - h := make(http.Header) - for k, v := range p.Headers { - h.Add(k, v) - } - page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) - // Type assert the page to a Page interface so that the type assertion in the - // `Extract*` methods will work. - return page.Elem().Interface().(Page), err -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go deleted file mode 100644 index 912daea36..000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. -*/ -package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go deleted file mode 100644 index 4251d6491..000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/single.go +++ /dev/null @@ -1,33 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. -type SinglePageBase PageResult - -// NextPageURL always returns "" to indicate that there are no more pages to return. -func (current SinglePageBase) NextPageURL() (string, error) { - return "", nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current SinglePageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the single page's body. This method is needed to satisfy the -// Page interface. -func (current SinglePageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go deleted file mode 100644 index b9986660c..000000000 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ /dev/null @@ -1,491 +0,0 @@ -package gophercloud - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -/* -BuildRequestBody builds a map[string]interface from the given `struct`. If -parent is not an empty string, the final map[string]interface returned will -encapsulate the built one. For example: - - disk := 1 - createOpts := flavors.CreateOpts{ - ID: "1", - Name: "m1.tiny", - Disk: &disk, - RAM: 512, - VCPUs: 1, - RxTxFactor: 1.0, - } - - body, err := gophercloud.BuildRequestBody(createOpts, "flavor") - -The above example can be run as-is, however it is recommended to look at how -BuildRequestBody is used within Gophercloud to more fully understand how it -fits within the request process as a whole rather than use it directly as shown -above. -*/ -func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]interface{}) - if optsValue.Kind() == reflect.Struct { - //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - - if f.Name != strings.Title(f.Name) { - //fmt.Printf("Skipping field: %s...\n", f.Name) - continue - } - - //fmt.Printf("Starting on field: %s...\n", f.Name) - - zero := isZero(v) - //fmt.Printf("v is zero?: %v\n", zero) - - // if the field has a required tag that's set to "true" - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) - // if the field's value is zero, return a missing-argument error - if zero { - // if the field has a 'required' tag, it can't have a zero-value - err := ErrMissingInput{} - err.Argument = f.Name - return nil, err - } - } - - if xorTag := f.Tag.Get("xor"); xorTag != "" { - //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) - xorField := optsValue.FieldByName(xorTag) - var xorFieldIsZero bool - if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { - xorFieldIsZero = true - } else { - if xorField.Kind() == reflect.Ptr { - xorField = xorField.Elem() - } - xorFieldIsZero = isZero(xorField) - } - if !(zero != xorFieldIsZero) { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) - err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) - return nil, err - } - } - - if orTag := f.Tag.Get("or"); orTag != "" { - //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) - //fmt.Printf("field is zero?: %v\n", zero) - if zero { - orField := optsValue.FieldByName(orTag) - var orFieldIsZero bool - if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { - orFieldIsZero = true - } else { - if orField.Kind() == reflect.Ptr { - orField = orField.Elem() - } - orFieldIsZero = isZero(orField) - } - if orFieldIsZero { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) - err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) - return nil, err - } - } - } - - jsonTag := f.Tag.Get("json") - if jsonTag == "-" { - continue - } - - if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { - sliceValue := v - if sliceValue.Kind() == reflect.Ptr { - sliceValue = sliceValue.Elem() - } - - for i := 0; i < sliceValue.Len(); i++ { - element := sliceValue.Index(i) - if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { - _, err := BuildRequestBody(element.Interface(), "") - if err != nil { - return nil, err - } - } - } - } - if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { - if zero { - //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) - if jsonTag != "" { - jsonTagPieces := strings.Split(jsonTag, ",") - if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { - if v.CanSet() { - if !v.IsNil() { - if v.Kind() == reflect.Ptr { - v.Set(reflect.Zero(v.Type())) - } - } - //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) - } - } - } - continue - } - - //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) - _, err := BuildRequestBody(v.Interface(), f.Name) - if err != nil { - return nil, err - } - } - } - - //fmt.Printf("opts: %+v \n", opts) - - b, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - //fmt.Printf("string(b): %s\n", string(b)) - - err = json.Unmarshal(b, &optsMap) - if err != nil { - return nil, err - } - - //fmt.Printf("optsMap: %+v\n", optsMap) - - if parent != "" { - optsMap = map[string]interface{}{parent: optsMap} - } - //fmt.Printf("optsMap after parent added: %+v\n", optsMap) - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -// EnabledState is a convenience type, mostly used in Create and Update -// operations. Because the zero value of a bool is FALSE, we need to use a -// pointer instead to indicate zero-ness. -type EnabledState *bool - -// Convenience vars for EnabledState values. -var ( - iTrue = true - iFalse = false - - Enabled EnabledState = &iTrue - Disabled EnabledState = &iFalse -) - -// IPVersion is a type for the possible IP address versions. Valid instances -// are IPv4 and IPv6 -type IPVersion int - -const ( - // IPv4 is used for IP version 4 addresses - IPv4 IPVersion = 4 - // IPv6 is used for IP version 6 addresses - IPv6 IPVersion = 6 -) - -// IntToPointer is a function for converting integers into integer pointers. -// This is useful when passing in options to operations. -func IntToPointer(i int) *int { - return &i -} - -/* -MaybeString is an internal function to be used by request methods in individual -resource packages. - -It takes a string that might be a zero value and returns either a pointer to its -address or nil. This is useful for allowing users to conveniently omit values -from an options struct by leaving them zeroed, but still pass nil to the JSON -serializer so they'll be omitted from the request body. -*/ -func MaybeString(original string) *string { - if original != "" { - return &original - } - return nil -} - -/* -MaybeInt is an internal function to be used by request methods in individual -resource packages. - -Like MaybeString, it accepts an int that may or may not be a zero value, and -returns either a pointer to its address or nil. It's intended to hint that the -JSON serializer should omit its field. -*/ -func MaybeInt(original int) *int { - if original != 0 { - return &original - } - return nil -} - -/* -func isUnderlyingStructZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Ptr: - return isUnderlyingStructZero(v.Elem()) - default: - return isZero(v) - } -} -*/ - -var t time.Time - -func isZero(v reflect.Value) bool { - //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - return true - } - return false - case reflect.Func, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - z := true - for i := 0; i < v.Len(); i++ { - z = z && isZero(v.Index(i)) - } - return z - case reflect.Struct: - if v.Type() == reflect.TypeOf(t) { - if v.Interface().(time.Time).IsZero() { - return true - } - return false - } - z := true - for i := 0; i < v.NumField(); i++ { - z = z && isZero(v.Field(i)) - } - return z - } - // Compare other types directly: - z := reflect.Zero(v.Type()) - //fmt.Printf("zero type for value: %+v\n\n\n", z) - return v.Interface() == z.Interface() -} - -/* -BuildQueryString is an internal function to be used by request methods in -individual resource packages. - -It accepts a tagged structure and expands it into a URL struct. Field names are -converted into query parameters based on a "q" tag. For example: - - type struct Something { - Bar string `q:"x_bar"` - Baz int `q:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into "?x_bar=AAA&lorem_ipsum=BBB". - -The struct's fields may be strings, integers, or boolean values. Fields left at -their type's zero value will be omitted from the query. -*/ -func BuildQueryString(opts interface{}) (*url.URL, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - params := url.Values{} - - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - qTag := f.Tag.Get("q") - - // if the field has a 'q' tag, it goes in the query string - if qTag != "" { - tags := strings.Split(qTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - loop: - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - goto loop - case reflect.String: - params.Add(tags[0], v.String()) - case reflect.Int: - params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) - case reflect.Bool: - params.Add(tags[0], strconv.FormatBool(v.Bool())) - case reflect.Slice: - switch v.Type().Elem() { - case reflect.TypeOf(0): - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) - } - default: - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], v.Index(i).String()) - } - } - case reflect.Map: - if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { - var s []string - for _, k := range v.MapKeys() { - value := v.MapIndex(k).String() - s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) - } - params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) - } - } - } else { - // if the field has a 'required' tag, it can't have a zero-value - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) - } - } - } - } - - return &url.URL{RawQuery: params.Encode()}, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -/* -BuildHeaders is an internal function to be used by request methods in -individual resource packages. - -It accepts an arbitrary tagged structure and produces a string map that's -suitable for use as the HTTP headers of an outgoing request. Field names are -mapped to header names based in "h" tags. - - type struct Something { - Bar string `h:"x_bar"` - Baz int `h:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into: - - map[string]string{ - "x_bar": "AAA", - "lorem_ipsum": "BBB", - } - -Untagged fields and fields left at their zero values are skipped. Integers, -booleans and string values are supported. -*/ -func BuildHeaders(opts interface{}) (map[string]string, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]string) - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - hTag := f.Tag.Get("h") - - // if the field has a 'h' tag, it goes in the header - if hTag != "" { - tags := strings.Split(hTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - switch v.Kind() { - case reflect.String: - optsMap[tags[0]] = v.String() - case reflect.Int: - optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) - case reflect.Bool: - optsMap[tags[0]] = strconv.FormatBool(v.Bool()) - } - } else { - // if the field has a 'required' tag, it can't have a zero-value - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) - } - } - } - - } - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return optsMap, fmt.Errorf("Options type is not a struct.") -} - -// IDSliceToQueryString takes a slice of elements and converts them into a query -// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the -// result would be `?name=20&name=40&name=60' -func IDSliceToQueryString(name string, ids []int) string { - str := "" - for k, v := range ids { - if k == 0 { - str += "?" - } else { - str += "&" - } - str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) - } - return str -} - -// IntWithinRange returns TRUE if an integer falls within a defined range, and -// FALSE if not. -func IntWithinRange(val, min, max int) bool { - return val > min && val < max -} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go deleted file mode 100644 index fce00462f..000000000 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ /dev/null @@ -1,501 +0,0 @@ -package gophercloud - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "strings" - "sync" -) - -// DefaultUserAgent is the default User-Agent string set in the request header. -const DefaultUserAgent = "gophercloud/2.0.0" - -// UserAgent represents a User-Agent header. -type UserAgent struct { - // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. - // All the strings to prepend are accumulated and prepended in the Join method. - prepend []string -} - -// Prepend prepends a user-defined string to the default User-Agent string. Users -// may pass in one or more strings to prepend. -func (ua *UserAgent) Prepend(s ...string) { - ua.prepend = append(s, ua.prepend...) -} - -// Join concatenates all the user-defined User-Agend strings with the default -// Gophercloud User-Agent string. -func (ua *UserAgent) Join() string { - uaSlice := append(ua.prepend, DefaultUserAgent) - return strings.Join(uaSlice, " ") -} - -// ProviderClient stores details that are required to interact with any -// services within a specific provider's API. -// -// Generally, you acquire a ProviderClient by calling the NewClient method in -// the appropriate provider's child package, providing whatever authentication -// credentials are required. -type ProviderClient struct { - // IdentityBase is the base URL used for a particular provider's identity - // service - it will be used when issuing authenticatation requests. It - // should point to the root resource of the identity service, not a specific - // identity version. - IdentityBase string - - // IdentityEndpoint is the identity endpoint. This may be a specific version - // of the identity service. If this is the case, this endpoint is used rather - // than querying versions first. - IdentityEndpoint string - - // TokenID is the ID of the most recently issued valid token. - // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. - // To safely read or write this value, call `Token` or `SetToken`, respectively - TokenID string - - // EndpointLocator describes how this provider discovers the endpoints for - // its constituent services. - EndpointLocator EndpointLocator - - // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. - HTTPClient http.Client - - // UserAgent represents the User-Agent header in the HTTP request. - UserAgent UserAgent - - // ReauthFunc is the function used to re-authenticate the user if the request - // fails with a 401 HTTP response code. This a needed because there may be multiple - // authentication functions for different Identity service versions. - ReauthFunc func() error - - // Throwaway determines whether if this client is a throw-away client. It's a copy of user's provider client - // with the token and reauth func zeroed. Such client can be used to perform reauthorization. - Throwaway bool - - // Context is the context passed to the HTTP request. - Context context.Context - - // mut is a mutex for the client. It protects read and write access to client attributes such as getting - // and setting the TokenID. - mut *sync.RWMutex - - // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication - // attempt happens at one time. - reauthmut *reauthlock - - authResult AuthResult -} - -// reauthlock represents a set of attributes used to help in the reauthentication process. -type reauthlock struct { - sync.RWMutex - reauthing bool - reauthingErr error - done *sync.Cond -} - -// AuthenticatedHeaders returns a map of HTTP headers that are common for all -// authenticated service requests. Blocks if Reauthenticate is in progress. -func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { - if client.IsThrowaway() { - return - } - if client.reauthmut != nil { - client.reauthmut.Lock() - for client.reauthmut.reauthing { - client.reauthmut.done.Wait() - } - client.reauthmut.Unlock() - } - t := client.Token() - if t == "" { - return - } - return map[string]string{"X-Auth-Token": t} -} - -// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. -// If the application's ProviderClient is not used concurrently, this doesn't need to be called. -func (client *ProviderClient) UseTokenLock() { - client.mut = new(sync.RWMutex) - client.reauthmut = new(reauthlock) -} - -// GetAuthResult returns the result from the request that was used to obtain a -// provider client's Keystone token. -// -// The result is nil when authentication has not yet taken place, when the token -// was set manually with SetToken(), or when a ReauthFunc was used that does not -// record the AuthResult. -func (client *ProviderClient) GetAuthResult() AuthResult { - if client.mut != nil { - client.mut.RLock() - defer client.mut.RUnlock() - } - return client.authResult -} - -// Token safely reads the value of the auth token from the ProviderClient. Applications should -// call this method to access the token instead of the TokenID field -func (client *ProviderClient) Token() string { - if client.mut != nil { - client.mut.RLock() - defer client.mut.RUnlock() - } - return client.TokenID -} - -// SetToken safely sets the value of the auth token in the ProviderClient. Applications may -// use this method in a custom ReauthFunc. -// -// WARNING: This function is deprecated. Use SetTokenAndAuthResult() instead. -func (client *ProviderClient) SetToken(t string) { - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - client.TokenID = t - client.authResult = nil -} - -// SetTokenAndAuthResult safely sets the value of the auth token in the -// ProviderClient and also records the AuthResult that was returned from the -// token creation request. Applications may call this in a custom ReauthFunc. -func (client *ProviderClient) SetTokenAndAuthResult(r AuthResult) error { - tokenID := "" - var err error - if r != nil { - tokenID, err = r.ExtractTokenID() - if err != nil { - return err - } - } - - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - client.TokenID = tokenID - client.authResult = r - return nil -} - -// CopyTokenFrom safely copies the token from another ProviderClient into the -// this one. -func (client *ProviderClient) CopyTokenFrom(other *ProviderClient) { - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - if other.mut != nil && other.mut != client.mut { - other.mut.RLock() - defer other.mut.RUnlock() - } - client.TokenID = other.TokenID - client.authResult = other.authResult -} - -// IsThrowaway safely reads the value of the client Throwaway field. -func (client *ProviderClient) IsThrowaway() bool { - if client.reauthmut != nil { - client.reauthmut.RLock() - defer client.reauthmut.RUnlock() - } - return client.Throwaway -} - -// SetThrowaway safely sets the value of the client Throwaway field. -func (client *ProviderClient) SetThrowaway(v bool) { - if client.reauthmut != nil { - client.reauthmut.Lock() - defer client.reauthmut.Unlock() - } - client.Throwaway = v -} - -// Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is -// called because of a 401 response, the caller may pass the previous token. In -// this case, the reauthentication can be skipped if another thread has already -// reauthenticated in the meantime. If no previous token is known, an empty -// string should be passed instead to force unconditional reauthentication. -func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { - if client.ReauthFunc == nil { - return nil - } - - if client.reauthmut == nil { - return client.ReauthFunc() - } - - client.reauthmut.Lock() - if client.reauthmut.reauthing { - for !client.reauthmut.reauthing { - client.reauthmut.done.Wait() - } - err = client.reauthmut.reauthingErr - client.reauthmut.Unlock() - return err - } - client.reauthmut.Unlock() - - client.reauthmut.Lock() - client.reauthmut.reauthing = true - client.reauthmut.done = sync.NewCond(client.reauthmut) - client.reauthmut.reauthingErr = nil - client.reauthmut.Unlock() - - if previousToken == "" || client.TokenID == previousToken { - err = client.ReauthFunc() - } - - client.reauthmut.Lock() - client.reauthmut.reauthing = false - client.reauthmut.reauthingErr = err - client.reauthmut.done.Broadcast() - client.reauthmut.Unlock() - return -} - -// RequestOpts customizes the behavior of the provider.Request() method. -type RequestOpts struct { - // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The - // content type of the request will default to "application/json" unless overridden by MoreHeaders. - // It's an error to specify both a JSONBody and a RawBody. - JSONBody interface{} - // RawBody contains an io.Reader that will be consumed by the request directly. No content-type - // will be set unless one is provided explicitly by MoreHeaders. - RawBody io.Reader - // JSONResponse, if provided, will be populated with the contents of the response body parsed as - // JSON. - JSONResponse interface{} - // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If - // the response has a different code, an error will be returned. - OkCodes []int - // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is - // provided with a blank value (""), that header will be *omitted* instead: use this to suppress - // the default Accept header or an inferred Content-Type, for example. - MoreHeaders map[string]string - // ErrorContext specifies the resource error type to return if an error is encountered. - // This lets resources override default error messages based on the response status code. - ErrorContext error -} - -var applicationJSON = "application/json" - -// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication -// header will automatically be provided. -func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - var body io.Reader - var contentType *string - - // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided - // io.ReadSeeker as-is. Default the content-type to application/json. - if options.JSONBody != nil { - if options.RawBody != nil { - return nil, errors.New("please provide only one of JSONBody or RawBody to gophercloud.Request()") - } - - rendered, err := json.Marshal(options.JSONBody) - if err != nil { - return nil, err - } - - body = bytes.NewReader(rendered) - contentType = &applicationJSON - } - - if options.RawBody != nil { - body = options.RawBody - } - - // Construct the http.Request. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - if client.Context != nil { - req = req.WithContext(client.Context) - } - - // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to - // modify or omit any header. - if contentType != nil { - req.Header.Set("Content-Type", *contentType) - } - req.Header.Set("Accept", applicationJSON) - - // Set the User-Agent header - req.Header.Set("User-Agent", client.UserAgent.Join()) - - if options.MoreHeaders != nil { - for k, v := range options.MoreHeaders { - if v != "" { - req.Header.Set(k, v) - } else { - req.Header.Del(k) - } - } - } - - // get latest token from client - for k, v := range client.AuthenticatedHeaders() { - req.Header.Set(k, v) - } - - // Set connection parameter to close the connection immediately when we've got the response - req.Close = true - - prereqtok := req.Header.Get("X-Auth-Token") - - // Issue the request. - resp, err := client.HTTPClient.Do(req) - if err != nil { - return nil, err - } - - // Allow default OkCodes if none explicitly set - okc := options.OkCodes - if okc == nil { - okc = defaultOkCodes(method) - } - - // Validate the HTTP response status. - var ok bool - for _, code := range okc { - if resp.StatusCode == code { - ok = true - break - } - } - - if !ok { - body, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - respErr := ErrUnexpectedResponseCode{ - URL: url, - Method: method, - Expected: options.OkCodes, - Actual: resp.StatusCode, - Body: body, - } - - errType := options.ErrorContext - switch resp.StatusCode { - case http.StatusBadRequest: - err = ErrDefault400{respErr} - if error400er, ok := errType.(Err400er); ok { - err = error400er.Error400(respErr) - } - case http.StatusUnauthorized: - if client.ReauthFunc != nil { - err = client.Reauthenticate(prereqtok) - if err != nil { - e := &ErrUnableToReauthenticate{} - e.ErrOriginal = respErr - return nil, e - } - if options.RawBody != nil { - if seeker, ok := options.RawBody.(io.Seeker); ok { - seeker.Seek(0, 0) - } - } - resp, err = client.Request(method, url, options) - if err != nil { - switch err.(type) { - case *ErrUnexpectedResponseCode: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err.(*ErrUnexpectedResponseCode) - return nil, e - default: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err - return nil, e - } - } - return resp, nil - } - err = ErrDefault401{respErr} - if error401er, ok := errType.(Err401er); ok { - err = error401er.Error401(respErr) - } - case http.StatusForbidden: - err = ErrDefault403{respErr} - if error403er, ok := errType.(Err403er); ok { - err = error403er.Error403(respErr) - } - case http.StatusNotFound: - err = ErrDefault404{respErr} - if error404er, ok := errType.(Err404er); ok { - err = error404er.Error404(respErr) - } - case http.StatusMethodNotAllowed: - err = ErrDefault405{respErr} - if error405er, ok := errType.(Err405er); ok { - err = error405er.Error405(respErr) - } - case http.StatusRequestTimeout: - err = ErrDefault408{respErr} - if error408er, ok := errType.(Err408er); ok { - err = error408er.Error408(respErr) - } - case http.StatusConflict: - err = ErrDefault409{respErr} - if error409er, ok := errType.(Err409er); ok { - err = error409er.Error409(respErr) - } - case 429: - err = ErrDefault429{respErr} - if error429er, ok := errType.(Err429er); ok { - err = error429er.Error429(respErr) - } - case http.StatusInternalServerError: - err = ErrDefault500{respErr} - if error500er, ok := errType.(Err500er); ok { - err = error500er.Error500(respErr) - } - case http.StatusServiceUnavailable: - err = ErrDefault503{respErr} - if error503er, ok := errType.(Err503er); ok { - err = error503er.Error503(respErr) - } - } - - if err == nil { - err = respErr - } - - return resp, err - } - - // Parse the response body as JSON, if requested to do so. - if options.JSONResponse != nil { - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { - return nil, err - } - } - - return resp, nil -} - -func defaultOkCodes(method string) []int { - switch { - case method == "GET": - return []int{200} - case method == "POST": - return []int{201, 202} - case method == "PUT": - return []int{201, 202} - case method == "PATCH": - return []int{200, 202, 204} - case method == "DELETE": - return []int{202, 204} - } - - return []int{} -} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go deleted file mode 100644 index 94a16bff0..000000000 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ /dev/null @@ -1,448 +0,0 @@ -package gophercloud - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - "time" -) - -/* -Result is an internal type to be used by individual resource packages, but its -methods will be available on a wide variety of user-facing embedding types. - -It acts as a base struct that other Result types, returned from request -functions, can embed for convenience. All Results capture basic information -from the HTTP transaction that was performed, including the response body, -HTTP headers, and any errors that happened. - -Generally, each Result type will have an Extract method that can be used to -further interpret the result's payload in a specific context. Extensions or -providers can then provide additional extraction functions to pull out -provider- or extension-specific information as well. -*/ -type Result struct { - // Body is the payload of the HTTP response from the server. In most cases, - // this will be the deserialized JSON structure. - Body interface{} - - // Header contains the HTTP header structure from the original response. - Header http.Header - - // Err is an error that occurred during the operation. It's deferred until - // extraction to make it easier to chain the Extract call. - Err error -} - -// ExtractInto allows users to provide an object into which `Extract` will extract -// the `Result.Body`. This would be useful for OpenStack providers that have -// different fields in the response object than OpenStack proper. -func (r Result) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - if reader, ok := r.Body.(io.Reader); ok { - if readCloser, ok := reader.(io.Closer); ok { - defer readCloser.Close() - } - return json.NewDecoder(reader).Decode(to) - } - - b, err := json.Marshal(r.Body) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -func (r Result) extractIntoPtr(to interface{}, label string) error { - if label == "" { - return r.ExtractInto(&to) - } - - var m map[string]interface{} - err := r.ExtractInto(&m) - if err != nil { - return err - } - - b, err := json.Marshal(m[label]) - if err != nil { - return err - } - - toValue := reflect.ValueOf(to) - if toValue.Kind() == reflect.Ptr { - toValue = toValue.Elem() - } - - switch toValue.Kind() { - case reflect.Slice: - typeOfV := toValue.Type().Elem() - if typeOfV.Kind() == reflect.Struct { - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) - - if mSlice, ok := m[label].([]interface{}); ok { - for _, v := range mSlice { - // For each iteration of the slice, we create a new struct. - // This is to work around a bug where elements of a slice - // are reused and not overwritten when the same copy of the - // struct is used: - // - // https://github.com/golang/go/issues/21092 - // https://github.com/golang/go/issues/24155 - // https://play.golang.org/p/NHo3ywlPZli - newType := reflect.New(typeOfV).Elem() - - b, err := json.Marshal(v) - if err != nil { - return err - } - - // This is needed for structs with an UnmarshalJSON method. - // Technically this is just unmarshalling the response into - // a struct that is never used, but it's good enough to - // trigger the UnmarshalJSON method. - for i := 0; i < newType.NumField(); i++ { - s := newType.Field(i).Addr().Interface() - - // Unmarshal is used rather than NewDecoder to also work - // around the above-mentioned bug. - err = json.Unmarshal(b, s) - if err != nil { - return err - } - } - - newSlice = reflect.Append(newSlice, newType) - } - } - - // "to" should now be properly modeled to receive the - // JSON response body and unmarshal into all the correct - // fields of the struct or composed extension struct - // at the end of this method. - toValue.Set(newSlice) - } - } - case reflect.Struct: - typeOfV := toValue.Type() - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - for i := 0; i < toValue.NumField(); i++ { - toField := toValue.Field(i) - if toField.Kind() == reflect.Struct { - s := toField.Addr().Interface() - err = json.NewDecoder(bytes.NewReader(b)).Decode(s) - if err != nil { - return err - } - } - } - } - } - - err = json.Unmarshal(b, &to) - return err -} - -// ExtractIntoStructPtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying struct type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Struct: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to struct, got: %v", t) - } -} - -// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying slice type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Slice: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to slice, got: %v", t) - } -} - -// PrettyPrintJSON creates a string containing the full response body as -// pretty-printed JSON. It's useful for capturing test fixtures and for -// debugging extraction bugs. If you include its output in an issue related to -// a buggy extraction function, we will all love you forever. -func (r Result) PrettyPrintJSON() string { - pretty, err := json.MarshalIndent(r.Body, "", " ") - if err != nil { - panic(err.Error()) - } - return string(pretty) -} - -// ErrResult is an internal type to be used by individual resource packages, but -// its methods will be available on a wide variety of user-facing embedding -// types. -// -// It represents results that only contain a potential error and -// nothing else. Usually, if the operation executed successfully, the Err field -// will be nil; otherwise it will be stocked with a relevant error. Use the -// ExtractErr method -// to cleanly pull it out. -type ErrResult struct { - Result -} - -// ExtractErr is a function that extracts error information, or nil, from a result. -func (r ErrResult) ExtractErr() error { - return r.Err -} - -/* -HeaderResult is an internal type to be used by individual resource packages, but -its methods will be available on a wide variety of user-facing embedding types. - -It represents a result that only contains an error (possibly nil) and an -http.Header. This is used, for example, by the objectstorage packages in -openstack, because most of the operations don't return response bodies, but do -have relevant information in headers. -*/ -type HeaderResult struct { - Result -} - -// ExtractInto allows users to provide an object into which `Extract` will -// extract the http.Header headers of the result. -func (r HeaderResult) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - tmpHeaderMap := map[string]string{} - for k, v := range r.Header { - if len(v) > 0 { - tmpHeaderMap[k] = v[0] - } - } - - b, err := json.Marshal(tmpHeaderMap) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -// RFC3339Milli describes a common time format used by some API responses. -const RFC3339Milli = "2006-01-02T15:04:05.999999Z" - -type JSONRFC3339Milli time.Time - -func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { - b := bytes.NewBuffer(data) - dec := json.NewDecoder(b) - var s string - if err := dec.Decode(&s); err != nil { - return err - } - t, err := time.Parse(RFC3339Milli, s) - if err != nil { - return err - } - *jt = JSONRFC3339Milli(t) - return nil -} - -const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" - -type JSONRFC3339MilliNoZ time.Time - -func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339MilliNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339MilliNoZ(t) - return nil -} - -type JSONRFC1123 time.Time - -func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - return err - } - *jt = JSONRFC1123(t) - return nil -} - -type JSONUnix time.Time - -func (jt *JSONUnix) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - unix, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - t = time.Unix(unix, 0) - *jt = JSONUnix(t) - return nil -} - -// RFC3339NoZ is the time format used in Heat (Orchestration). -const RFC3339NoZ = "2006-01-02T15:04:05" - -type JSONRFC3339NoZ time.Time - -func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339NoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339NoZ(t) - return nil -} - -// RFC3339ZNoT is the time format used in Zun (Containers Service). -const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" - -type JSONRFC3339ZNoT time.Time - -func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339ZNoT, s) - if err != nil { - return err - } - *jt = JSONRFC3339ZNoT(t) - return nil -} - -// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). -const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" - -type JSONRFC3339ZNoTNoZ time.Time - -func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339ZNoTNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339ZNoTNoZ(t) - return nil -} - -/* -Link is an internal type to be used in packages of collection resources that are -paginated in a certain way. - -It's a response substructure common to many paginated collection results that is -used to point to related pages. Usually, the one we care about is the one with -Rel field set to "next". -*/ -type Link struct { - Href string `json:"href"` - Rel string `json:"rel"` -} - -/* -ExtractNextURL is an internal function useful for packages of collection -resources that are paginated in a certain way. - -It attempts to extract the "next" URL from slice of Link structs, or -"" if no such URL is present. -*/ -func ExtractNextURL(links []Link) (string, error) { - var url string - - for _, l := range links { - if l.Rel == "next" { - url = l.Href - } - } - - if url == "" { - return "", nil - } - - return url, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go deleted file mode 100644 index f222f05a6..000000000 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ /dev/null @@ -1,154 +0,0 @@ -package gophercloud - -import ( - "io" - "net/http" - "strings" -) - -// ServiceClient stores details required to interact with a specific service API implemented by a provider. -// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. -type ServiceClient struct { - // ProviderClient is a reference to the provider that implements this service. - *ProviderClient - - // Endpoint is the base URL of the service's API, acquired from a service catalog. - // It MUST end with a /. - Endpoint string - - // ResourceBase is the base URL shared by the resources within a service's API. It should include - // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used - // as-is, instead. - ResourceBase string - - // This is the service client type (e.g. compute, sharev2). - // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. - // It is only exported because it gets set in a different package. - Type string - - // The microversion of the service to use. Set this to use a particular microversion. - Microversion string - - // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, - // values set in this field will be set on all the HTTP requests the service client sends. - MoreHeaders map[string]string -} - -// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. -func (client *ServiceClient) ResourceBaseURL() string { - if client.ResourceBase != "" { - return client.ResourceBase - } - return client.Endpoint -} - -// ServiceURL constructs a URL for a resource belonging to this provider. -func (client *ServiceClient) ServiceURL(parts ...string) string { - return client.ResourceBaseURL() + strings.Join(parts, "/") -} - -func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { - if v, ok := (JSONBody).(io.Reader); ok { - opts.RawBody = v - } else if JSONBody != nil { - opts.JSONBody = JSONBody - } - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - if opts.MoreHeaders == nil { - opts.MoreHeaders = make(map[string]string) - } - - if client.Microversion != "" { - client.setMicroversionHeader(opts) - } -} - -// Get calls `Request` with the "GET" HTTP verb. -func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, JSONResponse, opts) - return client.Request("GET", url, opts) -} - -// Post calls `Request` with the "POST" HTTP verb. -func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("POST", url, opts) -} - -// Put calls `Request` with the "PUT" HTTP verb. -func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PUT", url, opts) -} - -// Patch calls `Request` with the "PATCH" HTTP verb. -func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PATCH", url, opts) -} - -// Delete calls `Request` with the "DELETE" HTTP verb. -func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("DELETE", url, opts) -} - -// Head calls `Request` with the "HEAD" HTTP verb. -func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("HEAD", url, opts) -} - -func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { - switch client.Type { - case "compute": - opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion - case "sharev2": - opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion - case "volume": - opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion - case "baremetal": - opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion - case "baremetal-introspection": - opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion - } - - if client.Type != "" { - opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion - } -} - -// Request carries out the HTTP operation for the service client -func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - if len(client.MoreHeaders) > 0 { - if options == nil { - options = new(RequestOpts) - } - for k, v := range client.MoreHeaders { - options.MoreHeaders[k] = v - } - } - return client.ProviderClient.Request(method, url, options) -} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go deleted file mode 100644 index 68f9a5d3e..000000000 --- a/vendor/github.com/gophercloud/gophercloud/util.go +++ /dev/null @@ -1,102 +0,0 @@ -package gophercloud - -import ( - "fmt" - "net/url" - "path/filepath" - "strings" - "time" -) - -// WaitFor polls a predicate function, once per second, up to a timeout limit. -// This is useful to wait for a resource to transition to a certain state. -// To handle situations when the predicate might hang indefinitely, the -// predicate will be prematurely cancelled after the timeout. -// Resource packages will wrap this in a more convenient function that's -// specific to a certain resource, but it can also be useful on its own. -func WaitFor(timeout int, predicate func() (bool, error)) error { - type WaitForResult struct { - Success bool - Error error - } - - start := time.Now().Unix() - - for { - // If a timeout is set, and that's been exceeded, shut it down. - if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { - return fmt.Errorf("A timeout occurred") - } - - time.Sleep(1 * time.Second) - - var result WaitForResult - ch := make(chan bool, 1) - go func() { - defer close(ch) - satisfied, err := predicate() - result.Success = satisfied - result.Error = err - }() - - select { - case <-ch: - if result.Error != nil { - return result.Error - } - if result.Success { - return nil - } - // If the predicate has not finished by the timeout, cancel it. - case <-time.After(time.Duration(timeout) * time.Second): - return fmt.Errorf("A timeout occurred") - } - } -} - -// NormalizeURL is an internal function to be used by provider clients. -// -// It ensures that each endpoint URL has a closing `/`, as expected by -// ServiceClient's methods. -func NormalizeURL(url string) string { - if !strings.HasSuffix(url, "/") { - return url + "/" - } - return url -} - -// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as -// a reference in the filesystem, if necessary. basePath is assumed to contain -// either '.' when first used, or the file:// type fqdn of the parent resource. -// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml -func NormalizePathURL(basePath, rawPath string) (string, error) { - u, err := url.Parse(rawPath) - if err != nil { - return "", err - } - // if a scheme is defined, it must be a fqdn already - if u.Scheme != "" { - return u.String(), nil - } - // if basePath is a url, then child resources are assumed to be relative to it - bu, err := url.Parse(basePath) - if err != nil { - return "", err - } - var basePathSys, absPathSys string - if bu.Scheme != "" { - basePathSys = filepath.FromSlash(bu.Path) - absPathSys = filepath.Join(basePathSys, rawPath) - bu.Path = filepath.ToSlash(absPathSys) - return bu.String(), nil - } - - absPathSys = filepath.Join(basePath, rawPath) - u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } - u.Scheme = "file" - return u.String(), nil - -} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4ba..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE deleted file mode 100644 index 14127cd83..000000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -(The MIT License) - -Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md deleted file mode 100644 index 949b77e30..000000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Windows Terminal Sequences - -This library allow for enabling Windows terminal color support for Go. - -See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. - -## Usage - -```go -import ( - "syscall" - - sequences "github.com/konsorten/go-windows-terminal-sequences" -) - -func main() { - sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) -} - -``` - -## Authors - -The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). - -We thank all the authors who provided code to this library: - -* Felix Kollmann - -## License - -(The MIT License) - -Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod deleted file mode 100644 index 716c61312..000000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go deleted file mode 100644 index ef18d8f97..000000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build windows - -package sequences - -import ( - "syscall" - "unsafe" -) - -var ( - kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") - setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") -) - -func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { - const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 - - var mode uint32 - err := syscall.GetConsoleMode(syscall.Stdout, &mode) - if err != nil { - return err - } - - if enable { - mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING - } - - ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) - if ret == 0 { - return err - } - - return nil -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636a..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385cb..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad22..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml deleted file mode 100644 index d7b9589ab..000000000 --- a/vendor/github.com/mitchellh/copystructure/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.7 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE deleted file mode 100644 index 229851590..000000000 --- a/vendor/github.com/mitchellh/copystructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md deleted file mode 100644 index bcb8c8d2c..000000000 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go deleted file mode 100644 index db6a6aa1a..000000000 --- a/vendor/github.com/mitchellh/copystructure/copier_time.go +++ /dev/null @@ -1,15 +0,0 @@ -package copystructure - -import ( - "reflect" - "time" -) - -func init() { - Copiers[reflect.TypeOf(time.Time{})] = timeCopier -} - -func timeCopier(v interface{}) (interface{}, error) { - // Just... copy it. - return v.(time.Time), nil -} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go deleted file mode 100644 index 140435255..000000000 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ /dev/null @@ -1,548 +0,0 @@ -package copystructure - -import ( - "errors" - "reflect" - "sync" - - "github.com/mitchellh/reflectwalk" -) - -// Copy returns a deep copy of v. -func Copy(v interface{}) (interface{}, error) { - return Config{}.Copy(v) -} - -// CopierFunc is a function that knows how to deep copy a specific type. -// Register these globally with the Copiers variable. -type CopierFunc func(interface{}) (interface{}, error) - -// Copiers is a map of types that behave specially when they are copied. -// If a type is found in this map while deep copying, this function -// will be called to copy it instead of attempting to copy all fields. -// -// The key should be the type, obtained using: reflect.TypeOf(value with type). -// -// It is unsafe to write to this map after Copies have started. If you -// are writing to this map while also copying, wrap all modifications to -// this map as well as to Copy in a mutex. -var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) - -// Must is a helper that wraps a call to a function returning -// (interface{}, error) and panics if the error is non-nil. It is intended -// for use in variable initializations and should only be used when a copy -// error should be a crashing case. -func Must(v interface{}, err error) interface{} { - if err != nil { - panic("copy error: " + err.Error()) - } - - return v -} - -var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") - -type Config struct { - // Lock any types that are a sync.Locker and are not a mutex while copying. - // If there is an RLocker method, use that to get the sync.Locker. - Lock bool - - // Copiers is a map of types associated with a CopierFunc. Use the global - // Copiers map if this is nil. - Copiers map[reflect.Type]CopierFunc -} - -func (c Config) Copy(v interface{}) (interface{}, error) { - if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { - return nil, errPointerRequired - } - - w := new(walker) - if c.Lock { - w.useLocks = true - } - - if c.Copiers == nil { - c.Copiers = Copiers - } - - err := reflectwalk.Walk(v, w) - if err != nil { - return nil, err - } - - // Get the result. If the result is nil, then we want to turn it - // into a typed nil if we can. - result := w.Result - if result == nil { - val := reflect.ValueOf(v) - result = reflect.Indirect(reflect.New(val.Type())).Interface() - } - - return result, nil -} - -// Return the key used to index interfaces types we've seen. Store the number -// of pointers in the upper 32bits, and the depth in the lower 32bits. This is -// easy to calculate, easy to match a key with our current depth, and we don't -// need to deal with initializing and cleaning up nested maps or slices. -func ifaceKey(pointers, depth int) uint64 { - return uint64(pointers)<<32 | uint64(depth) -} - -type walker struct { - Result interface{} - - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value - - // This stores the number of pointers we've walked over, indexed by depth. - ps []int - - // If an interface is indirected by a pointer, we need to know the type of - // interface to create when creating the new value. Store the interface - // types here, indexed by both the walk depth and the number of pointers - // already seen at that depth. Use ifaceKey to calculate the proper uint64 - // value. - ifaceTypes map[uint64]reflect.Type - - // any locks we've taken, indexed by depth - locks []sync.Locker - // take locks while walking the structure - useLocks bool -} - -func (w *walker) Enter(l reflectwalk.Location) error { - w.depth++ - - // ensure we have enough elements to index via w.depth - for w.depth >= len(w.locks) { - w.locks = append(w.locks, nil) - } - - for len(w.ps) < w.depth+1 { - w.ps = append(w.ps, 0) - } - - return nil -} - -func (w *walker) Exit(l reflectwalk.Location) error { - locker := w.locks[w.depth] - w.locks[w.depth] = nil - if locker != nil { - defer locker.Unlock() - } - - // clear out pointers and interfaces as we exit the stack - w.ps[w.depth] = 0 - - for k := range w.ifaceTypes { - mask := uint64(^uint32(0)) - if k&mask == uint64(w.depth) { - delete(w.ifaceTypes, k) - } - } - - w.depth-- - if w.ignoreDepth > w.depth { - w.ignoreDepth = 0 - } - - if w.ignoring() { - return nil - } - - switch l { - case reflectwalk.Array: - fallthrough - case reflectwalk.Map: - fallthrough - case reflectwalk.Slice: - w.replacePointerMaybe() - - // Pop map off our container - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - // Pop off the key and value - mv := w.valPop() - mk := w.valPop() - m := w.cs[len(w.cs)-1] - - // If mv is the zero value, SetMapIndex deletes the key form the map, - // or in this case never adds it. We need to create a properly typed - // zero value so that this key can be set. - if !mv.IsValid() { - mv = reflect.Zero(m.Elem().Type().Elem()) - } - m.Elem().SetMapIndex(mk, mv) - case reflectwalk.ArrayElem: - // Pop off the value and the index and set it on the array - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - a := w.cs[len(w.cs)-1] - ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call - if ae.CanSet() { - ae.Set(v) - } - } - case reflectwalk.SliceElem: - // Pop off the value and the index and set it on the slice - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - se := s.Elem().Index(i) - if se.CanSet() { - se.Set(v) - } - } - case reflectwalk.Struct: - w.replacePointerMaybe() - - // Remove the struct from the container stack - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.StructField: - // Pop off the value and the field - v := w.valPop() - f := w.valPop().Interface().(reflect.StructField) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - sf := reflect.Indirect(s).FieldByName(f.Name) - - if sf.CanSet() { - sf.Set(v) - } - } - case reflectwalk.WalkLoc: - // Clear out the slices for GC - w.cs = nil - w.vals = nil - } - - return nil -} - -func (w *walker) Map(m reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(m) - - // Create the map. If the map itself is nil, then just make a nil map - var newMap reflect.Value - if m.IsNil() { - newMap = reflect.New(m.Type()) - } else { - newMap = wrapPtr(reflect.MakeMap(m.Type())) - } - - w.cs = append(w.cs, newMap) - w.valPush(newMap) - return nil -} - -func (w *walker) MapElem(m, k, v reflect.Value) error { - return nil -} - -func (w *walker) PointerEnter(v bool) error { - if v { - w.ps[w.depth]++ - } - return nil -} - -func (w *walker) PointerExit(v bool) error { - if v { - w.ps[w.depth]-- - } - return nil -} - -func (w *walker) Interface(v reflect.Value) error { - if !v.IsValid() { - return nil - } - if w.ifaceTypes == nil { - w.ifaceTypes = make(map[uint64]reflect.Type) - } - - w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() - return nil -} - -func (w *walker) Primitive(v reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(v) - - // IsValid verifies the v is non-zero and CanInterface verifies - // that we're allowed to read this value (unexported fields). - var newV reflect.Value - if v.IsValid() && v.CanInterface() { - newV = reflect.New(v.Type()) - newV.Elem().Set(v) - } - - w.valPush(newV) - w.replacePointerMaybe() - return nil -} - -func (w *walker) Slice(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var newS reflect.Value - if s.IsNil() { - newS = reflect.New(s.Type()) - } else { - newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) - } - - w.cs = append(w.cs, newS) - w.valPush(newS) - return nil -} - -func (w *walker) SliceElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the slice here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Array(a reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(a) - - newA := reflect.New(a.Type()) - - w.cs = append(w.cs, newA) - w.valPush(newA) - return nil -} - -func (w *walker) ArrayElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the array here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Struct(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var v reflect.Value - if c, ok := Copiers[s.Type()]; ok { - // We have a Copier for this struct, so we use that copier to - // get the copy, and we ignore anything deeper than this. - w.ignoreDepth = w.depth - - dup, err := c(s.Interface()) - if err != nil { - return err - } - - // We need to put a pointer to the value on the value stack, - // so allocate a new pointer and set it. - v = reflect.New(s.Type()) - reflect.Indirect(v).Set(reflect.ValueOf(dup)) - } else { - // No copier, we copy ourselves and allow reflectwalk to guide - // us deeper into the structure for copying. - v = reflect.New(s.Type()) - } - - // Push the value onto the value stack for setting the struct field, - // and add the struct itself to the containers stack in case we walk - // deeper so that its own fields can be modified. - w.valPush(v) - w.cs = append(w.cs, v) - - return nil -} - -func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { - if w.ignoring() { - return nil - } - - // If PkgPath is non-empty, this is a private (unexported) field. - // We do not set this unexported since the Go runtime doesn't allow us. - if f.PkgPath != "" { - return reflectwalk.SkipEntry - } - - // Push the field onto the stack, we'll handle it when we exit - // the struct field in Exit... - w.valPush(reflect.ValueOf(f)) - return nil -} - -// ignore causes the walker to ignore any more values until we exit this on -func (w *walker) ignore() { - w.ignoreDepth = w.depth -} - -func (w *walker) ignoring() bool { - return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth -} - -func (w *walker) pointerPeek() bool { - return w.ps[w.depth] > 0 -} - -func (w *walker) valPop() reflect.Value { - result := w.vals[len(w.vals)-1] - w.vals = w.vals[:len(w.vals)-1] - - // If we're out of values, that means we popped everything off. In - // this case, we reset the result so the next pushed value becomes - // the result. - if len(w.vals) == 0 { - w.Result = nil - } - - return result -} - -func (w *walker) valPush(v reflect.Value) { - w.vals = append(w.vals, v) - - // If we haven't set the result yet, then this is the result since - // it is the first (outermost) value we're seeing. - if w.Result == nil && v.IsValid() { - w.Result = v.Interface() - } -} - -func (w *walker) replacePointerMaybe() { - // Determine the last pointer value. If it is NOT a pointer, then - // we need to push that onto the stack. - if !w.pointerPeek() { - w.valPush(reflect.Indirect(w.valPop())) - return - } - - v := w.valPop() - - // If the expected type is a pointer to an interface of any depth, - // such as *interface{}, **interface{}, etc., then we need to convert - // the value "v" from *CONCRETE to *interface{} so types match for - // Set. - // - // Example if v is type *Foo where Foo is a struct, v would become - // *interface{} instead. This only happens if we have an interface expectation - // at this depth. - // - // For more info, see GH-16 - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { - y := reflect.New(iType) // Create *interface{} - y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) - v = y // v is now typed *interface{} (where *v = Foo) - } - - for i := 1; i < w.ps[w.depth]; i++ { - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { - iface := reflect.New(iType).Elem() - iface.Set(v) - v = iface - } - - p := reflect.New(v.Type()) - p.Elem().Set(v) - v = p - } - - w.valPush(v) -} - -// if this value is a Locker, lock it and add it to the locks slice -func (w *walker) lock(v reflect.Value) { - if !w.useLocks { - return - } - - if !v.IsValid() || !v.CanInterface() { - return - } - - type rlocker interface { - RLocker() sync.Locker - } - - var locker sync.Locker - - // We can't call Interface() on a value directly, since that requires - // a copy. This is OK, since the pointer to a value which is a sync.Locker - // is also a sync.Locker. - if v.Kind() == reflect.Ptr { - switch l := v.Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } else if v.CanAddr() { - switch l := v.Addr().Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } - - // still no callable locker - if locker == nil { - return - } - - // don't lock a mutex directly - switch locker.(type) { - case *sync.Mutex, *sync.RWMutex: - return - } - - locker.Lock() - w.locks[w.depth] = locker -} - -// wrapPtr is a helper that takes v and always make it *v. copystructure -// stores things internally as pointers until the last moment before unwrapping -func wrapPtr(v reflect.Value) reflect.Value { - if !v.IsValid() { - return v - } - vPtr := reflect.New(v.Type()) - vPtr.Elem().Set(v) - return vPtr -} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod deleted file mode 100644 index d01864309..000000000 --- a/vendor/github.com/mitchellh/copystructure/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/mitchellh/copystructure - -require github.com/mitchellh/reflectwalk v1.0.0 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum deleted file mode 100644 index be5724561..000000000 --- a/vendor/github.com/mitchellh/copystructure/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 1689c7d73..000000000 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - "1.11.x" - - tip - -script: - - go test diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index 3b3cb723f..000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,21 +0,0 @@ -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9..000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 1f0abc65a..000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,217 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from, to, data) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - var err error - for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) - if err != nil { - return nil, err - } - - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() - } - } - - return data, nil - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod deleted file mode 100644 index d2a712562..000000000 --- a/vendor/github.com/mitchellh/mapstructure/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/mapstructure diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 256ee63fb..000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1149 +0,0 @@ -// Package mapstructure exposes functionality to convert an arbitrary -// map[string]interface{} into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. - // - // If an error is returned, the entire decode will fail with that - // error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec( - d.config.DecodeHook, - inputVal.Type(), outVal.Type(), input) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - fmt.Sprintf("%s[%d]", name, i), - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") - - // Determine the name of the key in the map - keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { - continue - } - keyName = tagParts[0] - } - - // If "squash" is specified in the tag, we squash the field down. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - if squash && v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - err := d.decode(keyName, x.Interface(), vMap) - if err != nil { - return err - } - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return err - } - } - return nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - - // If the input value is empty, then don't allocate since non-nil != nil - if dataVal.Len() == 0 { - return nil - } - - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, mval, val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() - - // If "squash" is specified in the tag, we squash the field down. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) - } else { - structs = append(structs, structVal.FieldByName(fieldType.Name)) - } - continue - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if strings.EqualFold(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Just ignore. - continue - } - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = fmt.Sprintf("%s.%s", name, key) - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - } - - return nil -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml deleted file mode 100644 index 4f2ee4d97..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md deleted file mode 100644 index ac82cd2e1..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# reflectwalk - -reflectwalk is a Go library for "walking" a value in Go using reflection, -in the same way a directory tree can be "walked" on the filesystem. Walking -a complex structure can allow you to do manipulations on unknown structures -such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod deleted file mode 100644 index 52bb7c469..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go deleted file mode 100644 index 6a7f17611..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/location.go +++ /dev/null @@ -1,19 +0,0 @@ -package reflectwalk - -//go:generate stringer -type=Location location.go - -type Location uint - -const ( - None Location = iota - Map - MapKey - MapValue - Slice - SliceElem - Array - ArrayElem - Struct - StructField - WalkLoc -) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go deleted file mode 100644 index 70760cf4c..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. - -package reflectwalk - -import "fmt" - -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" - -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} - -func (i Location) String() string { - if i >= Location(len(_Location_index)-1) { - return fmt.Sprintf("Location(%d)", i) - } - return _Location_name[_Location_index[i]:_Location_index[i+1]] -} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go deleted file mode 100644 index d7ab7b6d7..000000000 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ /dev/null @@ -1,401 +0,0 @@ -// reflectwalk is a package that allows you to "walk" complex structures -// similar to how you may "walk" a filesystem: visiting every element one -// by one and calling callback functions allowing you to handle and manipulate -// those elements. -package reflectwalk - -import ( - "errors" - "reflect" -) - -// PrimitiveWalker implementations are able to handle primitive values -// within complex structures. Primitive values are numbers, strings, -// booleans, funcs, chans. -// -// These primitive values are often members of more complex -// structures (slices, maps, etc.) that are walkable by other interfaces. -type PrimitiveWalker interface { - Primitive(reflect.Value) error -} - -// InterfaceWalker implementations are able to handle interface values as they -// are encountered during the walk. -type InterfaceWalker interface { - Interface(reflect.Value) error -} - -// MapWalker implementations are able to handle individual elements -// found within a map structure. -type MapWalker interface { - Map(m reflect.Value) error - MapElem(m, k, v reflect.Value) error -} - -// SliceWalker implementations are able to handle slice elements found -// within complex structures. -type SliceWalker interface { - Slice(reflect.Value) error - SliceElem(int, reflect.Value) error -} - -// ArrayWalker implementations are able to handle array elements found -// within complex structures. -type ArrayWalker interface { - Array(reflect.Value) error - ArrayElem(int, reflect.Value) error -} - -// StructWalker is an interface that has methods that are called for -// structs when a Walk is done. -type StructWalker interface { - Struct(reflect.Value) error - StructField(reflect.StructField, reflect.Value) error -} - -// EnterExitWalker implementations are notified before and after -// they walk deeper into complex structures (into struct fields, -// into slice elements, etc.) -type EnterExitWalker interface { - Enter(Location) error - Exit(Location) error -} - -// PointerWalker implementations are notified when the value they're -// walking is a pointer or not. Pointer is called for _every_ value whether -// it is a pointer or not. -type PointerWalker interface { - PointerEnter(bool) error - PointerExit(bool) error -} - -// SkipEntry can be returned from walk functions to skip walking -// the value of this field. This is only valid in the following functions: -// -// - Struct: skips all fields from being walked -// - StructField: skips walking the struct value -// -var SkipEntry = errors.New("skip this entry") - -// Walk takes an arbitrary value and an interface and traverses the -// value, calling callbacks on the interface if they are supported. -// The interface should implement one or more of the walker interfaces -// in this package, such as PrimitiveWalker, StructWalker, etc. -func Walk(data, walker interface{}) (err error) { - v := reflect.ValueOf(data) - ew, ok := walker.(EnterExitWalker) - if ok { - err = ew.Enter(WalkLoc) - } - - if err == nil { - err = walk(v, walker) - } - - if ok && err == nil { - err = ew.Exit(WalkLoc) - } - - return -} - -func walk(v reflect.Value, w interface{}) (err error) { - // Determine if we're receiving a pointer and if so notify the walker. - // The logic here is convoluted but very important (tests will fail if - // almost any part is changed). I will try to explain here. - // - // First, we check if the value is an interface, if so, we really need - // to check the interface's VALUE to see whether it is a pointer. - // - // Check whether the value is then a pointer. If so, then set pointer - // to true to notify the user. - // - // If we still have a pointer or an interface after the indirections, then - // we unwrap another level - // - // At this time, we also set "v" to be the dereferenced value. This is - // because once we've unwrapped the pointer we want to use that value. - pointer := false - pointerV := v - - for { - if pointerV.Kind() == reflect.Interface { - if iw, ok := w.(InterfaceWalker); ok { - if err = iw.Interface(pointerV); err != nil { - return - } - } - - pointerV = pointerV.Elem() - } - - if pointerV.Kind() == reflect.Ptr { - pointer = true - v = reflect.Indirect(pointerV) - } - if pw, ok := w.(PointerWalker); ok { - if err = pw.PointerEnter(pointer); err != nil { - return - } - - defer func(pointer bool) { - if err != nil { - return - } - - err = pw.PointerExit(pointer) - }(pointer) - } - - if pointer { - pointerV = v - } - pointer = false - - // If we still have a pointer or interface we have to indirect another level. - switch pointerV.Kind() { - case reflect.Ptr, reflect.Interface: - continue - } - break - } - - // We preserve the original value here because if it is an interface - // type, we want to pass that directly into the walkPrimitive, so that - // we can set it. - originalV := v - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - k := v.Kind() - if k >= reflect.Int && k <= reflect.Complex128 { - k = reflect.Int - } - - switch k { - // Primitives - case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: - err = walkPrimitive(originalV, w) - return - case reflect.Map: - err = walkMap(v, w) - return - case reflect.Slice: - err = walkSlice(v, w) - return - case reflect.Struct: - err = walkStruct(v, w) - return - case reflect.Array: - err = walkArray(v, w) - return - default: - panic("unsupported type: " + k.String()) - } -} - -func walkMap(v reflect.Value, w interface{}) error { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Map) - } - - if mw, ok := w.(MapWalker); ok { - if err := mw.Map(v); err != nil { - return err - } - } - - for _, k := range v.MapKeys() { - kv := v.MapIndex(k) - - if mw, ok := w.(MapWalker); ok { - if err := mw.MapElem(v, k, kv); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(MapKey) - } - - if err := walk(k, w); err != nil { - return err - } - - if ok { - ew.Exit(MapKey) - ew.Enter(MapValue) - } - - if err := walk(kv, w); err != nil { - return err - } - - if ok { - ew.Exit(MapValue) - } - } - - if ewok { - ew.Exit(Map) - } - - return nil -} - -func walkPrimitive(v reflect.Value, w interface{}) error { - if pw, ok := w.(PrimitiveWalker); ok { - return pw.Primitive(v) - } - - return nil -} - -func walkSlice(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Slice) - } - - if sw, ok := w.(SliceWalker); ok { - if err := sw.Slice(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if sw, ok := w.(SliceWalker); ok { - if err := sw.SliceElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(SliceElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(SliceElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Slice) - } - - return nil -} - -func walkArray(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Array) - } - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.Array(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.ArrayElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(ArrayElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(ArrayElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Array) - } - - return nil -} - -func walkStruct(v reflect.Value, w interface{}) (err error) { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Struct) - } - - skip := false - if sw, ok := w.(StructWalker); ok { - err = sw.Struct(v) - if err == SkipEntry { - skip = true - err = nil - } - if err != nil { - return - } - } - - if !skip { - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) - - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) - - // SkipEntry just pretends this field doesn't even exist - if err == SkipEntry { - continue - } - - if err != nil { - return - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(StructField) - } - - err = walk(f, w) - if err != nil { - return - } - - if ok { - ew.Exit(StructField) - } - } - } - - if ewok { - ew.Exit(Struct) - } - - return nil -} diff --git a/vendor/github.com/mschoch/smat/.gitignore b/vendor/github.com/mschoch/smat/.gitignore deleted file mode 100644 index eee880759..000000000 --- a/vendor/github.com/mschoch/smat/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -#* -*.sublime-* -*~ -.#* -.project -.settings -**/.idea/ -**/*.iml -/examples/bolt/boltsmat-fuzz.zip -/examples/bolt/workdir/ -.DS_Store -coverage.out -*.test -tags diff --git a/vendor/github.com/mschoch/smat/.travis.yml b/vendor/github.com/mschoch/smat/.travis.yml deleted file mode 100644 index 3c9c34636..000000000 --- a/vendor/github.com/mschoch/smat/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: -- 1.6 -script: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -- go get github.com/kisielk/errcheck -- go test -v -race -- go vet -- errcheck ./... -- go test -coverprofile=profile.out -covermode=count -- goveralls -service=travis-ci -coverprofile=profile.out -repotoken $COVERALLS -notifications: - email: - - marty.schoch@gmail.com diff --git a/vendor/github.com/mschoch/smat/LICENSE b/vendor/github.com/mschoch/smat/LICENSE deleted file mode 100644 index 7a4a3ea24..000000000 --- a/vendor/github.com/mschoch/smat/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/mschoch/smat/README.md b/vendor/github.com/mschoch/smat/README.md deleted file mode 100644 index f5ca1c544..000000000 --- a/vendor/github.com/mschoch/smat/README.md +++ /dev/null @@ -1,166 +0,0 @@ -# smat – State Machine Assisted Testing - -The concept is simple, describe valid uses of your library as states and actions. States describe which actions are possible, and with what probability they should occur. Actions mutate the context and transition to another state. - -By doing this, two things are possible: - -1. Use [go-fuzz](https://github.com/dvyukov/go-fuzz) to find/test interesting sequences of operations on your library. - -2. Automate longevity testing of your application by performing long sequences of valid operations. - -**NOTE**: both of these can also incorporate validation logic (not just failure detection by building validation into the state machine) - -## Status - -The API is still not stable. This is brand new and we'll probably change things we don't like... - -[![Build Status](https://travis-ci.org/mschoch/smat.svg?branch=master)](https://travis-ci.org/mschoch/smat) -[![Coverage Status](https://coveralls.io/repos/github/mschoch/smat/badge.svg?branch=master)](https://coveralls.io/github/mschoch/smat?branch=master) -[![GoDoc](https://godoc.org/github.com/mschoch/smat?status.svg)](https://godoc.org/github.com/mschoch/smat) -[![codebeat badge](https://codebeat.co/badges/c3ff6180-a241-4128-97f0-fa6bf6f48752)](https://codebeat.co/projects/github-com-mschoch-smat) -[![Go Report Card](https://goreportcard.com/badge/github.com/mschoch/smat)](https://goreportcard.com/report/github.com/mschoch/smat) - -## License - -Apache 2.0 - -## How do I use it? - -### smat.Context - -Choose a structure to keep track of any state. You pass in an instance of this when you start, and it will be passed to every action when it executes. The actions may mutate this context. - -For example, consider a database library, once you open a database handle, you need to use it inside of the other actions. So you might use a structure like: - -``` -type context struct { - db *DB -} -``` - -### smat.State - -A state represents a state that your application/library can be in, and the probabilities thats certain actions should be taken. - -For example, consider a database library, in a state where the database is open, there many things you can do. Let's consider just two right now, you can set a value, or you can delete a value. - -``` -func dbOpen(next byte) smat.ActionID { - return smat.PercentExecute(next, - smat.PercentAction{50, setValue}, - smat.PercentAction{50, deleteValue}, - ) -} -``` - -This says that in the open state, there are two valid actions, 50% of the time you should set a value and 50% of the time you should delete a value. **NOTE**: these percentages are just for characterizing the test workload. - -### smat.Action - -Actions are functions that do some work, optionally mutate the context, and indicate the next state to transition to. Below we see an example action to set value in a database. - -``` -func setValueFunc(ctx smat.Context) (next smat.State, err error) { - // type assert to our custom context type - context := ctx.(*context) - // perform the operation - err = context.db.Set("k", "v") - if err != nil { - return nil, err - } - // return the new state - return dbOpen, nil -} -``` - -### smat.ActionID and smat.ActionMap - -Actions are just functions, and since we can't compare functions in Go, we need to introduce an external identifier for them. This allows us to build a bi-directional mapping which we'll take advantage of later. - -``` -const ( - setup smat.ActionID = iota - teardown - setValue - deleteValue -) - -var actionMap = smat.ActionMap{ - setup: setupFunc, - teardown: teardownFunc, - setValue: setValueFunc, - deleteValue: deleteValueFunc, -} -``` - -### smat.ActionSeq - -A common way that many users think about a library is as a sequence of actions to be performed. Using the ActionID's that we've already seen we can build up sequences of operations. - -``` - actionSeq := smat.ActionSeq{ - open, - setValue, - setValue, - setValue, - } -``` - -Notice that we build these actions using the constants we defined above, and because of this we can have a bi-directional mapping between a stream of bytes (driving the state machine) and a sequence of actions to be performed. - -## Fuzzing - -We've built a lot of pieces, lets wire it up to go-fuzz. - -``` -func Fuzz(data []byte) int { - return smat.Fuzz(&context{}, setup, teardown, actionMap, data) -} -``` - -* The first argument is an instance of context structure. -* The second argument is the ActionID of our setup function. The setup function does not consume any of the input stream and is used to initialize the context and determine the start state. -* The third argument is the teardown function. This will be called unconditionally to clean up any resources associated with the test. -* The fourth argument is the actionMap which maps all ActionIDs to Actions. -* The fifth argument is the data passed in from the go-fuzz application. - -### Generating Initial go-fuzz Corpus - -Earlier we mentioned the bi-directional mapping between Actions and the byte stream driving the state machine. We can now leverage this to build the inital go-fuzz corpus. - -Using the `ActinSeq`s we learned about earlier we can build up a list of them as: - - var actionSeqs = []smat.ActionSeq{...} - -Then, we can write them out to disk using: - -``` -for i, actionSeq := range actionSeqs { - byteSequence, err := actionSeq.ByteEncoding(&context{}, setup, teardown, actionMap) - if err != nil { - // handle error - } - os.MkdirAll("workdir/corpus", 0700) - ioutil.WriteFile(fmt.Sprintf("workdir/corpus/%d", i), byteSequence, 0600) -} -``` - -You can then either put this into a test case or a main application depending on your needs. - -## Longevity Testing - -Fuzzing is great, but most of your corpus is likely to be shorter meaningful sequences. And go-fuzz works to find shortest sequences that cause problems, but sometimes you actually want to explore longer sequences that appear to go-fuzz as not triggering additional code coverage. - -For these cases we have another helper you can use: - -``` - Longevity(ctx, setup, teardown, actionMap, 0, closeChan) -``` - -The first four arguments are the same, the last two are: -* random seed used to ensure repeatable tests -* closeChan (chan struct{}) - close this channel if you want the function to stop and return ErrClosed, otherwise it will run forever - -## Examples - -See the examples directory for a working example that tests some BoltDB functionality. diff --git a/vendor/github.com/mschoch/smat/actionseq.go b/vendor/github.com/mschoch/smat/actionseq.go deleted file mode 100644 index 6c8297f89..000000000 --- a/vendor/github.com/mschoch/smat/actionseq.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2016 Marty Schoch - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the -// License. You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an "AS -// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language -// governing permissions and limitations under the License. - -package smat - -// ActionSeq represents a sequence of actions, used for populating a corpus -// of byte sequences for the corresponding fuzz tests -type ActionSeq []ActionID - -// ByteEncoding runs the FSM to produce a byte sequence to trigger the -// desired action -func (a ActionSeq) ByteEncoding(ctx Context, setup, teardown ActionID, actionMap ActionMap) ([]byte, error) { - setupFunc, teardownFunc, err := actionMap.findSetupTeardown(setup, teardown) - if err != nil { - return nil, err - } - state, err := setupFunc(ctx) - if err != nil { - return nil, err - } - defer func() { - _, _ = teardownFunc(ctx) - }() - - var rv []byte - for _, actionID := range a { - b, err := probeStateForAction(state, actionID) - if err != nil { - return nil, err - } - rv = append(rv, b) - action, ok := actionMap[actionID] - if !ok { - continue - } - state, err = action(ctx) - if err != nil { - return nil, err - } - } - return rv, nil -} - -func probeStateForAction(state State, actionID ActionID) (byte, error) { - for i := 0; i < 256; i++ { - nextActionID := state(byte(i)) - if nextActionID == actionID { - return byte(i), nil - } - } - return 0, ErrActionNotPossible -} diff --git a/vendor/github.com/mschoch/smat/smat.go b/vendor/github.com/mschoch/smat/smat.go deleted file mode 100644 index f6ea4975f..000000000 --- a/vendor/github.com/mschoch/smat/smat.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Marty Schoch - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the -// License. You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an "AS -// IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language -// governing permissions and limitations under the License. - -package smat - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "math/rand" -) - -// Logger is a configurable logger used by this package -// by default output is discarded -var Logger = log.New(ioutil.Discard, "smat ", log.LstdFlags) - -// Context is a container for any user state -type Context interface{} - -// State is a function which describes which action to perform in the event -// that a particular byte is seen -type State func(next byte) ActionID - -// PercentAction describes the frequency with which an action should occur -// for example: Action{Percent:10, Action:DonateMoney} means that 10% of -// the time you should donate money. -type PercentAction struct { - Percent int - Action ActionID -} - -// Action is any function which returns the next state to transition to -// it can optionally mutate the provided context object -// if any error occurs, it may return an error which will abort execution -type Action func(Context) (State, error) - -// ActionID is a unique identifier for an action -type ActionID int - -// NopAction does nothing and simply continues to the next input -var NopAction ActionID = -1 - -// ActionMap is a mapping form ActionID to Action -type ActionMap map[ActionID]Action - -func (a ActionMap) findSetupTeardown(setup, teardown ActionID) (Action, Action, error) { - setupFunc, ok := a[setup] - if !ok { - return nil, nil, ErrSetupMissing - } - teardownFunc, ok := a[teardown] - if !ok { - return nil, nil, ErrTeardownMissing - } - return setupFunc, teardownFunc, nil -} - -// Fuzz runs the fuzzing state machine with the provided context -// first, the setup action is executed unconditionally -// the start state is determined by this action -// actionMap is a lookup table for all actions -// the data byte slice determines all future state transitions -// finally, the teardown action is executed unconditionally for cleanup -func Fuzz(ctx Context, setup, teardown ActionID, actionMap ActionMap, data []byte) int { - reader := bytes.NewReader(data) - err := runReader(ctx, setup, teardown, actionMap, reader, nil) - if err != nil { - panic(err) - } - return 1 -} - -// Longevity runs the state machine with the provided context -// first, the setup action is executed unconditionally -// the start state is determined by this action -// actionMap is a lookup table for all actions -// random bytes are generated to determine all future state transitions -// finally, the teardown action is executed unconditionally for cleanup -func Longevity(ctx Context, setup, teardown ActionID, actionMap ActionMap, seed int64, closeChan chan struct{}) error { - source := rand.NewSource(seed) - return runReader(ctx, setup, teardown, actionMap, rand.New(source), closeChan) -} - -var ( - // ErrSetupMissing is returned when the setup action cannot be found - ErrSetupMissing = fmt.Errorf("setup action missing") - // ErrTeardownMissing is returned when the teardown action cannot be found - ErrTeardownMissing = fmt.Errorf("teardown action missing") - // ErrClosed is returned when the closeChan was closed to cancel the op - ErrClosed = fmt.Errorf("closed") - // ErrActionNotPossible is returned when an action is encountered in a - // FuzzCase that is not possible in the current state - ErrActionNotPossible = fmt.Errorf("action not possible in state") -) - -func runReader(ctx Context, setup, teardown ActionID, actionMap ActionMap, r io.Reader, closeChan chan struct{}) error { - setupFunc, teardownFunc, err := actionMap.findSetupTeardown(setup, teardown) - if err != nil { - return err - } - Logger.Printf("invoking setup action") - state, err := setupFunc(ctx) - if err != nil { - return err - } - defer func() { - Logger.Printf("invoking teardown action") - _, _ = teardownFunc(ctx) - }() - - reader := bufio.NewReader(r) - for next, err := reader.ReadByte(); err == nil; next, err = reader.ReadByte() { - select { - case <-closeChan: - return ErrClosed - default: - actionID := state(next) - action, ok := actionMap[actionID] - if !ok { - Logger.Printf("no such action defined, continuing") - continue - } - Logger.Printf("invoking action - %d", actionID) - state, err = action(ctx) - if err != nil { - Logger.Printf("it was action %d that returned err %v", actionID, err) - return err - } - } - } - return err -} - -// PercentExecute interprets the next byte as a random value and normalizes it -// to values 0-99, it then looks to see which action should be execued based -// on the action distributions -func PercentExecute(next byte, pas ...PercentAction) ActionID { - percent := int(99 * int(next) / 255) - - sofar := 0 - for _, pa := range pas { - sofar = sofar + pa.Percent - if percent < sofar { - return pa.Action - } - - } - return NopAction -} diff --git a/vendor/github.com/netapp/trident/LICENSE b/vendor/github.com/netapp/trident/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/netapp/trident/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/netapp/trident/NOTICE.txt b/vendor/github.com/netapp/trident/NOTICE.txt deleted file mode 100644 index b26ff411c..000000000 --- a/vendor/github.com/netapp/trident/NOTICE.txt +++ /dev/null @@ -1,8921 +0,0 @@ -NetApp Notice Report - - - -Copyright 2019 - - - -About this document - -The following copyright statements and licenses apply to the software components that are distributed with the Trident version 19.10 product.This product does not necessarily use all the software components referred to below. - - - -Where required, source code is published at the following location. - -ftp://ftp.netapp.com/frm-ntap/opensource - -Components: - -alpine-baselayout 3.1.1 : GNU General Public License v2.0 only - -alpine-keys 2.1 : MIT License - -Animal Sniffer Annotations 1.17 : MIT License - -Apache Thrift 20180806-snapshot-6e67faa9 : Apache License 2.0 - -Azure-azure-sdk-for-go v32.4.0 : Apache License 2.0 - -Azure-go-autorest v12.3.0 : Apache License 2.0 - -Azure-go-autorest v12.4.1 : Apache License 2.0 - -Bash 5.0.0 : GNU General Public License v3.0 or later - -beorn7-perks 20180510-snapshot-3a771d99 : MIT License - -BusyBox 1_31_0 : GNU General Public License v2.0 or later - -cenkalti/backoff v3.0.0 : MIT License - -census-instrumentation/opencensus-go 0.20.2 : Apache License 2.0 - -census-instrumentation/opencensus-go 20190515-snapshot-aab39bd6 : Apache License 2.0 - -census-instrumentation/opencensus-go v0.19.3 : Apache License 2.0 - -census-instrumentation/opencensus-go v0.22.0 : Apache License 2.0 - -Checker Qual 2.5.2 : MIT License - -client-go kubernetes-1.16.0 : Apache License 2.0 - -client_golang 0.8.0 : Apache License 2.0 - -code.googlesource.com/google-api-go-client v0.4.0 : BSD 3-clause "New" or "Revised" License - -com.google.api.grpc:proto-google-common-protos 1.12.0 : Apache License 2.0 - -container-storage-interface/spec v1.1.0 : Apache License 2.0 - -contrib.go.opencensus.io/exporter/ocagent v0.4.12 : Apache License 2.0 - -contrib.go.opencensus.io/exporter/ocagent v0.4.6 : Apache License 2.0 - -contrib.go.opencensus.io/exporter/ocagent v0.6.0 : Apache License 2.0 - -dimchansky/utfbom v1.1.0 : Apache License 2.0 - -dnaeon/go-vcr 20190620-snapshot-ac890611 : BSD 2-clause "Simplified" License - -docker-cli 20180417-snapshot : (Educational Community License v2.0 AND MIT License AND BSD 2-clause "Simplified" License AND ISC License AND Apache License 2.0 AND BSD 3-clause "New" or "Revised" License) - -docker-go-plugins-helpers 20181025-snapshot-1e6269c3 : Apache License 2.0 - -Docker-org 20190221-snapshot : MIT License - -e2fsprogs 1.45.4 : GNU General Public License v2.0 or later - -error-prone annotations 2.2.0 : Apache License 2.0 - -etcd 20180415-snapshot-b7770cd9 : Apache License 2.0 - -etcd 20180521-snapshot : Apache License 2.0 - -etcd 3.3.13 : Apache License 2.0 - -etcd-io/etcd 3.3.13 : Apache License 2.0 - -etcdctl 20190122-snapshot : Apache License 2.0 - -eudev 3.2.8 : GNU General Public License v2.0 only - -evanphx/json-patch v4.2.0 : BSD 3-clause "New" or "Revised" License - -FindBugs jsr305 3.0.2 : Apache License 2.0 - -gengo 0 : Apache License 2.0 - -gitea.com/xgo/appengine v1.5.0 : Apache License 2.0 - -github.com/konsorten/go-windows-terminal-sequences v1.0.2 : MIT License - -github.com/microsoft/go-winio v0.4.14 : MIT License - -glycerine/go-unsnap-stream 0.0~git20180323.9f0cb55 : Expat License - -GNU Core Utilities v8.31 : GNU General Public License v3.0 or later - -GNU Ncurses 6.1_p20190518 : MIT License - -go humanize v1.0.0 : MIT License - -Go Logrus v1.4.2 : MIT License - -Go programming language 1.10.8 : BSD 3-clause "New" or "Revised" License - -Go programming language 20171111-snapshot : BSD 3-clause "New" or "Revised" License - -Go programming language 20180728-snapshot-614d502a : BSD 3-clause "New" or "Revised" License - -Go Testify 20190313-snapshot-34c6fa2d : MIT License - -Go Testify v1.3.0 : MIT License - -go-check-check 20180628-snapshot-788fd784 : BSD 2-clause "Simplified" License - -go-inf-inf v0.9.1 : BSD 3-clause "New" or "Revised" License - -go-logfmt-logfmt v0.4.0 : MIT License - -go-spew 1.1.0 : ISC License - -go-spew v1.1.1 : ISC License - -go-systemd v17 : Apache License 2.0 - -go.uuid v1.2.0 : MIT License - -GoDoc Text v0.1.0 : MIT License - -gogo-protobuf v1.3.0 : BSD 3-clause "New" or "Revised" License - -golang protobuf v1.2.0 : BSD 3-clause "New" or "Revised" License - -golang protobuf v1.3.1 : BSD 3-clause "New" or "Revised" License - -golang protobuf v1.3.2 : BSD 3-clause "New" or "Revised" License - -golang-github-docker-go-connections-dev 20181130-snapshot : (Educational Community License v2.0 AND Apache License 2.0) - -golang-github-docker-spdystream-dev 0.0~git20151103.0.4d80814 : Apache License 2.0 - -golang-github-ghodss-yaml-dev 20190128-snapshot : (MIT License AND BSD 3-clause "New" or "Revised" License) - -golang-github-spf13-pflag-dev 1.0.3 : BSD 3-clause "New" or "Revised" License - -golang-github-spf13-pflag-dev v1.0.5 : BSD 3-clause "New" or "Revised" License - -golang-github-stretchr-testify 1.4.0 : MIT License - -golang-gogoprotobuf-dev 1.2.1+git20190611.dadb6258 : BSD 3-clause "New" or "Revised" License - -golang-golang-x-oauth2 0.0~git20190604.0f29369 : BSD 3-clause "New" or "Revised" License - -golang-golangorg-crypto-devel 0 : BSD 3-clause "New" or "Revised" License - -golang-google-appengine 1.4.0 : Apache License 2.0 - -golang-google-genproto-dev 0.0~git20190111.db91494 : Apache License 2.0 - -golang-google-grpc 1.22.1 : BSD 3-clause "New" or "Revised" License - -golang-snappy-go-dev v0.0.1 : BSD 3-clause "New" or "Revised" License - -golang-x-text-dev 0.3.2 : BSD 3-clause "New" or "Revised" License - -golang.org/x/crypto 20180704-snapshot : BSD 3-clause "New" or "Revised" License - -golang.org/x/crypto 20180901-snapshot-182538f8 : BSD 3-clause "New" or "Revised" License - -golang.org/x/crypto 20180925-snapshot : BSD 3-clause "New" or "Revised" License - -golang.org/x/crypto 20181025-snapshot-74cb1d3d : BSD 3-clause "New" or "Revised" License - -golang.org/x/net 20180925-snapshot : BSD 3-clause "New" or "Revised" License - -golang.org/x/net 20181115-snapshot-adae6a3d : BSD 3-clause "New" or "Revised" License - -golang.org/x/oauth2 20190130-snapshot-99b60b75 : BSD 3-clause "New" or "Revised" License - -golang.org/x/sync 20181108-snapshot-42b31787 : BSD 3-clause "New" or "Revised" License - -golang.org/x/sys 20190212-snapshot-3b520910 : BSD 3-clause "New" or "Revised" License - -golang.org/x/text 20171230-snapshot : BSD 3-clause "New" or "Revised" License - -golang.org/x/time 20180126-snapshot-f51c1270 : BSD 3-clause "New" or "Revised" License - -golang/appengine v1.5.0 : Apache License 2.0 - -golang/oauth2 0 : BSD 3-clause "New" or "Revised" License - -golang/sync 20181108-snapshot-42b31787 : BSD 3-clause "New" or "Revised" License - -golang/text 20181210-snapshot-17bcc049 : BSD 3-clause "New" or "Revised" License - -google-api-go-client v0.4.0 : BSD 3-clause "New" or "Revised" License - -google-cloud-go v0.38.0 : Apache License 2.0 - -google-gofuzz v1.0.0 : Apache License 2.0 - -google-gson 2.7 : Apache License 2.0 - -google/go-cmp v0.3.0 : BSD 3-clause "New" or "Revised" License - -google/go-genproto 0 : Apache License 2.0 - -google/go-genproto 20180817-snapshot-c66870c0 : Apache License 2.0 - -google/go-genproto 20180831-snapshot-11092d34 : Apache License 2.0 - -google/go-genproto 20190729-snapshot-c506a9f9 : Apache License 2.0 - -google/uuid v1.1.1 : BSD 3-clause "New" or "Revised" License - -googleapis/google-api-go-client v0.4.0 : BSD 3-clause "New" or "Revised" License - -googleapis/google-cloud-go v0.38.0 : Apache License 2.0 - -Gorilla Web Toolkit v1.7.3 : BSD 3-clause "New" or "Revised" License - -groupcache 20170207-snapshot : Apache License 2.0 - -grpc-gateway 20180820-snapshot : BSD 3-clause "New" or "Revised" License - -grpc-gateway v1.5.0 : BSD 3-clause "New" or "Revised" License - -grpc-gateway v1.9.5 : BSD 3-clause "New" or "Revised" License - -grpc-go v1.14.0 : Apache License 2.0 - -grpc-go v1.15.0 : Apache License 2.0 - -grpc-go v1.19.0 : Apache License 2.0 - -grpc-go v1.22.1 : Apache License 2.0 - -Guava: Google Core Libraries for Java 26.0-android : Apache License 2.0 - -hashicorp-golang-lru v0.5.0 : Mozilla Public License 2.0 - -inconshreveable/mousetrap v1.0 : Apache License 2.0 - -io.grpc:grpc-context 1.19.0 : Apache License 2.0 - -io.grpc:grpc-core 1.19.0 : Apache License 2.0 - -io.grpc:grpc-protobuf 1.19.0 : Apache License 2.0 - -io.grpc:grpc-protobuf-lite 1.19.0 : Apache License 2.0 - -io.grpc:grpc-stub 1.19.0 : Apache License 2.0 - -J2ObjC Annotations 1.1 : Apache License 2.0 - -javax.annotation API 1.3.2 : Common Development and Distribution License 1.1 - -jsoniter-go v1.1.7 : MIT License - -jwt-go v3.2.0 : MIT License - -k8s.io/code-generator kubernetes-1.16.0-beta.1 : Apache License 2.0 - -k8s.io/klog v0.4.0 : Apache License 2.0 - -k8s.io/kube-openapi 20190226-snapshot-ea82251f : Apache License 2.0 - -k8s.io/utils 0 : Apache License 2.0 - -Kerberos 1.17 : MIT License - -keyutils 1.6 : (GNU Library General Public License v2 or later AND GNU General Public License v2.0 or later) - -kr/pretty 0.1.0 : MIT License - -kubernetes/api kubernetes-1.16.0 : Apache License 2.0 - -kubernetes/apiextensions-apiserver kubernetes-1.16.0 : Apache License 2.0 - -kubernetes/apimachinery kubernetes-1.16.0-rc.2 : Apache License 2.0 - -libcap-doc 2.27 : GNU General Public License v3.0 or later - -libcomerr2 1.45.2 : BSD 3-clause "New" or "Revised" License - -libevent - an event notification library 2.1.10 : BSD 3-clause "New" or "Revised" License - -libnfsidmap 2.3.4 : BSD 3-clause "New" or "Revised" License - -LibreSSL Portable Security Libraries 2.9.1 : (SSLeay License AND OpenSSL License) - -libverto 0.3.1 : MIT License - -Linux Extended Attributes 2.2.52 : (GNU Lesser General Public License v2.1 or later AND GNU General Public License v2.0 or later) - -logfmt 0 : MIT License - -logfmt 20140226-snapshot-b84e30ac : MIT License - -LVM2 2.02.184 : (GNU Library General Public License v2 or later AND BSD 2-clause "Simplified" License AND GNU General Public License v2.0 or later) - -Masterminds-semver v1.4.2 : MIT License - -matttproud-golang_protobuf_extensions v1.0.1 : Apache License 2.0 - -mergo v0.3.5 : BSD 3-clause "New" or "Revised" License - -mitchellh-copystructure v1.0.0 : MIT License - -mitchellh-go-homedir v1.1.0 : MIT License - -mitchellh-hashstructure v1.0.0 : MIT License - -mitchellh-reflectwalk v1.0.1 : MIT License - -modern-go/concurrent 1.0.0 : Apache License 2.0 - -modern-go/concurrent 20180511-snapshot : Apache License 2.0 - -modern-go/reflect2 1.0.1 : Apache License 2.0 - -modern-go/reflect2 20190422-snapshot : Apache License 2.0 - -mschoch/smat 20170712-snapshot : Apache License 2.0 - -multipath-tools 0.8.1 : GNU General Public License v3.0 or later - -musl 1.1.22 : MIT License - -NetApp/trident v19.07.0-alpha.1 : Apache License 2.0 - -olekukonko-tablewriter v0.0.1 : MIT License - -OpenCensus 0.19.2 : Apache License 2.0 - -OpenCensus 0.2.1 : Apache License 2.0 - -OpenCensus 20181214-snapshot-ba49f567 : Apache License 2.0 - -OpenSSL 1.1.1d : Apache License 2.0 - -openzipkin/zipkin-go v0.1.1 : Apache License 2.0 - -philhofer-fwd v1.0.0 : MIT License - -pkg/errors v0.8.1 : BSD 2-clause "Simplified" License - -pmezard-go-difflib 1.0.0 : BSD 3-clause "New" or "Revised" License - -prometheus-client_model 20180714-snapshot-5c3871d8 : Apache License 2.0 - -prometheus-common 20180808-snapshot-c7de2306 : Apache License 2.0 - -prometheus-procfs 20180808-snapshot-05ee40e3 : Apache License 2.0 - -Protocol Buffer Java API 3.7.0 : BSD 3-clause "New" or "Revised" License - -raymondjacobson/mgo 20181107-snapshot-eeefdecb : BSD 2-clause "Simplified" License - -RoaringBitmap-roaring v0.4.20 : Apache License 2.0 - -rs-xid v1.2.1 : MIT License - -shopspring-decimal 1.1.0 : MIT License - -sigs.k8s.io/yaml v1.1.0 : (MIT License AND BSD 3-clause "New" or "Revised" License) - -spf13-cobra 0.0.5 : Apache License 2.0 - -stretchr/objx v0.1 : MIT License - -test-repo-billy/azure-sdk-for-go v32.0.0 : Apache License 2.0 - -tinylib-msgp 1.0.2 : MIT License - -TransmogrifAI 0.3.4 : BSD 3-clause "New" or "Revised" License - -Userspace RCU 0.11.0 : GNU Library General Public License v2 or later - -util-linux 2.33.2 : GNU General Public License v2.0 or later - -yaml for Go v2.2.2 : (MIT License AND Apache License 2.0) - - - -Licenses: - - - -Apache License 2.0 - -(Apache Thrift 20180806-snapshot-6e67faa9, Azure-azure-sdk-for-go v32.4.0, Azure-go-autorest v12.3.0, Azure-go-autorest v12.4.1, census-instrumentation/opencensus-go 0.20.2, census-instrumentation/opencensus-go 20190515-snapshot-aab39bd6, census-instrumentation/opencensus-go v0.19.3, census-instrumentation/opencensus-go v0.22.0, client-go kubernetes-1.16.0, client_golang 0.8.0, com.google.api.grpc:proto-google-common-protos 1.12.0, container-storage-interface/spec v1.1.0, contrib.go.opencensus.io/exporter/ocagent v0.4.12, contrib.go.opencensus.io/exporter/ocagent v0.4.6, contrib.go.opencensus.io/exporter/ocagent v0.6.0, dimchansky/utfbom v1.1.0, docker-cli 20180417-snapshot, docker-go-plugins-helpers 20181025-snapshot-1e6269c3, error-prone annotations 2.2.0, etcd 20180415-snapshot-b7770cd9, etcd 20180521-snapshot, etcd 3.3.13, etcd-io/etcd 3.3.13, etcdctl 20190122-snapshot, FindBugs jsr305 3.0.2, gengo 0, gitea.com/xgo/appengine v1.5.0, go-systemd v17, golang-github-docker-go-connections-dev 20181130-snapshot, golang-github-docker-spdystream-dev 0.0~git20151103.0.4d80814, golang-google-appengine 1.4.0, golang-google-genproto-dev 0.0~git20190111.db91494, golang/appengine v1.5.0, google-cloud-go v0.38.0, google-gofuzz v1.0.0, google-gson 2.7, google/go-genproto 0, google/go-genproto 20180817-snapshot-c66870c0, google/go-genproto 20180831-snapshot-11092d34, google/go-genproto 20190729-snapshot-c506a9f9, googleapis/google-cloud-go v0.38.0, groupcache 20170207-snapshot, grpc-go v1.14.0, grpc-go v1.15.0, grpc-go v1.19.0, grpc-go v1.22.1, Guava: Google Core Libraries for Java 26.0-android, inconshreveable/mousetrap v1.0, io.grpc:grpc-context 1.19.0, io.grpc:grpc-core 1.19.0, io.grpc:grpc-protobuf 1.19.0, io.grpc:grpc-protobuf-lite 1.19.0, io.grpc:grpc-stub 1.19.0, J2ObjC Annotations 1.1, k8s.io/code-generator kubernetes-1.16.0-beta.1, k8s.io/klog v0.4.0, k8s.io/kube-openapi 20190226-snapshot-ea82251f, k8s.io/utils 0, kubernetes/api kubernetes-1.16.0, kubernetes/apiextensions-apiserver kubernetes-1.16.0, kubernetes/apimachinery kubernetes-1.16.0-rc.2, matttproud-golang_protobuf_extensions v1.0.1, modern-go/concurrent 1.0.0, modern-go/concurrent 20180511-snapshot, modern-go/reflect2 1.0.1, modern-go/reflect2 20190422-snapshot, mschoch/smat 20170712-snapshot, NetApp/trident v19.07.0-alpha.1, OpenCensus 0.19.2, OpenCensus 0.2.1, OpenCensus 20181214-snapshot-ba49f567, OpenSSL 1.1.1d, openzipkin/zipkin-go v0.1.1, prometheus-client_model 20180714-snapshot-5c3871d8, prometheus-common 20180808-snapshot-c7de2306, prometheus-procfs 20180808-snapshot-05ee40e3, RoaringBitmap-roaring v0.4.20, spf13-cobra 0.0.5, test-repo-billy/azure-sdk-for-go v32.0.0, yaml for Go v2.2.2) - - - -Apache License - -Version 2.0, January 2004 - -========================= - - - - - -http://www.apache.org/licenses/ - - - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - - -1. Definitions. - - - -"License" shall mean the terms and conditions for use, reproduction, and - -distribution as defined by Sections 1 through 9 of this document. - - - -"Licensor" shall mean the copyright owner or entity authorized by the copyright - -owner that is granting the License. - - - -"Legal Entity" shall mean the union of the acting entity and all other entities - -that control, are controlled by, or are under common control with that entity. - -For the purposes of this definition, "control" means (i) the power, direct or - -indirect, to cause the direction or management of such entity, whether by - -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the - -outstanding shares, or (iii) beneficial ownership of such entity. - - - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions - -granted by this License. - - - -"Source" form shall mean the preferred form for making modifications, including - -but not limited to software source code, documentation source, and configuration - -files. - - - -"Object" form shall mean any form resulting from mechanical transformation or - -translation of a Source form, including but not limited to compiled object code, - -generated documentation, and conversions to other media types. - - - -"Work" shall mean the work of authorship, whether in Source or Object form, made - -available under the License, as indicated by a copyright notice that is included - -in or attached to the work (an example is provided in the Appendix below). - - - -"Derivative Works" shall mean any work, whether in Source or Object form, that is - -based on (or derived from) the Work and for which the editorial revisions, - -annotations, elaborations, or other modifications represent, as a whole, an - -original work of authorship. For the purposes of this License, Derivative Works - -shall not include works that remain separable from, or merely link (or bind by - -name) to the interfaces of, the Work and Derivative Works thereof. - - - -"Contribution" shall mean any work of authorship, including the original version - -of the Work and any modifications or additions to that Work or Derivative Works - -thereof, that is intentionally submitted to Licensor for inclusion in the Work by - -the copyright owner or by an individual or Legal Entity authorized to submit on - -behalf of the copyright owner. For the purposes of this definition, "submitted" - -means any form of electronic, verbal, or written communication sent to the - -Licensor or its representatives, including but not limited to communication on - -electronic mailing lists, source code control systems, and issue tracking systems - -that are managed by, or on behalf of, the Licensor for the purpose of discussing - -and improving the Work, but excluding communication that is conspicuously marked - -or otherwise designated in writing by the copyright owner as "Not a - -Contribution." - - - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of - -whom a Contribution has been received by Licensor and subsequently incorporated - -within the Work. - - - -2. Grant of Copyright License. Subject to the terms and conditions of this - -License, each Contributor hereby grants to You a perpetual, worldwide, - -non-exclusive, no-charge, royalty-free, irrevocable copyright license to - -reproduce, prepare Derivative Works of, publicly display, publicly perform, - -sublicense, and distribute the Work and such Derivative Works in Source or Object - -form. - - - -3. Grant of Patent License. Subject to the terms and conditions of this License, - -each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, - -no-charge, royalty-free, irrevocable (except as stated in this section) patent - -license to make, have made, use, offer to sell, sell, import, and otherwise - -transfer the Work, where such license applies only to those patent claims - -licensable by such Contributor that are necessarily infringed by their - -Contribution(s) alone or by combination of their Contribution(s) with the Work to - -which such Contribution(s) was submitted. If You institute patent litigation - -against any entity (including a cross-claim or counterclaim in a lawsuit) - -alleging that the Work or a Contribution incorporated within the Work constitutes - -direct or contributory patent infringement, then any patent licenses granted to - -You under this License for that Work shall terminate as of the date such - -litigation is filed. - - - -4. Redistribution. You may reproduce and distribute copies of the Work or - -Derivative Works thereof in any medium, with or without modifications, and in - -Source or Object form, provided that You meet the following conditions: - - - - a. You must give any other recipients of the Work or Derivative Works a copy of - - this License; and - - - - b. You must cause any modified files to carry prominent notices stating that - - You changed the files; and - - - - c. You must retain, in the Source form of any Derivative Works that You - - distribute, all copyright, patent, trademark, and attribution notices from - - the Source form of the Work, excluding those notices that do not pertain to - - any part of the Derivative Works; and - - - - d. If the Work includes a "NOTICE" text file as part of its distribution, then - - any Derivative Works that You distribute must include a readable copy of the - - attribution notices contained within such NOTICE file, excluding those - - notices that do not pertain to any part of the Derivative Works, in at least - - one of the following places: within a NOTICE text file distributed as part of - - the Derivative Works; within the Source form or documentation, if provided - - along with the Derivative Works; or, within a display generated by the - - Derivative Works, if and wherever such third-party notices normally appear. - - The contents of the NOTICE file are for informational purposes only and do - - not modify the License. You may add Your own attribution notices within - - Derivative Works that You distribute, alongside or as an addendum to the - - NOTICE text from the Work, provided that such additional attribution notices - - cannot be construed as modifying the License. - - - -You may add Your own copyright statement to Your modifications and may provide - -additional or different license terms and conditions for use, reproduction, or - -distribution of Your modifications, or for any such Derivative Works as a whole, - -provided Your use, reproduction, and distribution of the Work otherwise complies - -with the conditions stated in this License. - - - -5. Submission of Contributions. Unless You explicitly state otherwise, any - -Contribution intentionally submitted for inclusion in the Work by You to the - -Licensor shall be under the terms and conditions of this License, without any - -additional terms or conditions. Notwithstanding the above, nothing herein shall - -supersede or modify the terms of any separate license agreement you may have - -executed with Licensor regarding such Contributions. - - - -6. Trademarks. This License does not grant permission to use the trade names, - -trademarks, service marks, or product names of the Licensor, except as required - -for reasonable and customary use in describing the origin of the Work and - -reproducing the content of the NOTICE file. - - - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in - -writing, Licensor provides the Work (and each Contributor provides its - -Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - -either express or implied, including, without limitation, any warranties or - -conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - -PARTICULAR PURPOSE. You are solely responsible for determining the - -appropriateness of using or redistributing the Work and assume any risks - -associated with Your exercise of permissions under this License. - - - -8. Limitation of Liability. In no event and under no legal theory, whether in - -tort (including negligence), contract, or otherwise, unless required by - -applicable law (such as deliberate and grossly negligent acts) or agreed to in - -writing, shall any Contributor be liable to You for damages, including any - -direct, indirect, special, incidental, or consequential damages of any character - -arising as a result of this License or out of the use or inability to use the - -Work (including but not limited to damages for loss of goodwill, work stoppage, - -computer failure or malfunction, or any and all other commercial damages or - -losses), even if such Contributor has been advised of the possibility of such - -damages. - - - -9. Accepting Warranty or Additional Liability. While redistributing the Work or - -Derivative Works thereof, You may choose to offer, and charge a fee for, - -acceptance of support, warranty, indemnity, or other liability obligations and/or - -rights consistent with this License. However, in accepting such obligations, You - -may act only on Your own behalf and on Your sole responsibility, not on behalf of - -any other Contributor, and only if You agree to indemnify, defend, and hold each - -Contributor harmless for any liability incurred by, or claims asserted against, - -such Contributor by reason of your accepting any such warranty or additional - -liability. - - - -END OF TERMS AND CONDITIONS - - - -APPENDIX: How to apply the Apache License to your work - - - -To apply the Apache License to your work, attach the following boilerplate - -notice, with the fields enclosed by brackets "[]" replaced with your own - -identifying information. (Don't include the brackets!) The text should be - -enclosed in the appropriate comment syntax for the file format. We also recommend - -that a file or class name and description of purpose be included on the same - -"printed page" as the copyright notice for easier identification within - -third-party archives. - - - - Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, - - Version 2.0 (the "License"); you may not use this file except in compliance - - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law - - or agreed to in writing, software distributed under the License is - - distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - - KIND, either express or implied. See the License for the specific language - - governing permissions and limitations under the License. - - - ---- - - - -BSD 2-clause "Simplified" License - -(dnaeon/go-vcr 20190620-snapshot-ac890611, docker-cli 20180417-snapshot, LVM2 2.02.184, pkg/errors v0.8.1, raymondjacobson/mgo 20181107-snapshot-eeefdecb) - - - -BSD Two Clause License - -====================== - - - -Redistribution and use in source and binary forms, with or without modification, - -are permitted provided that the following conditions are met: - - - - 1. Redistributions of source code must retain the above copyright notice, this - - list of conditions and the following disclaimer. - - - - 2. Redistributions in binary form must reproduce the above copyright notice, - - this list of conditions and the following disclaimer in the documentation - - and/or other materials provided with the distribution. - - - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED - -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - -SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH - -DAMAGE. - - - ---- - - - -BSD 2-clause "Simplified" License - -(go-check-check 20180628-snapshot-788fd784) - - - -Copyright (c) 2010-2013 Gustavo Niemeyer - - - -All rights reserved. - - - -Redistribution and use in source and binary forms, with or without - -modification, are permitted provided that the following conditions are met: - - - -1. Redistributions of source code must retain the above copyright notice, this - - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - - this list of conditions and the following disclaimer in the documentation - - and/or other materials provided with the distribution. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(Go programming language 20180728-snapshot-614d502a, golang-golangorg-crypto-devel 0, golang.org/x/crypto 20180704-snapshot, golang.org/x/crypto 20180901-snapshot-182538f8, golang.org/x/crypto 20180925-snapshot, golang.org/x/net 20180925-snapshot, golang.org/x/time 20180126-snapshot-f51c1270) - - - -Copyright (c) 2009 The Go Authors. All rights reserved. - - - -Redistribution and use in source and binary forms, with or without - -modification, are permitted provided that the following conditions are - -met: - - - - * Redistributions of source code must retain the above copyright - -notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - -copyright notice, this list of conditions and the following disclaimer - -in the documentation and/or other materials provided with the - -distribution. - - * Neither the name of Google Inc. nor the names of its - -contributors may be used to endorse or promote products derived from - -this software without specific prior written permission. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(golang-github-spf13-pflag-dev 1.0.3) - - - -Copyright (c) 2012 Alex Ogier. All rights reserved. - -Copyright (c) 2012 The Go Authors. All rights reserved. - - - -Redistribution and use in source and binary forms, with or without - -modification, are permitted provided that the following conditions are - -met: - - - - * Redistributions of source code must retain the above copyright - -notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - -copyright notice, this list of conditions and the following disclaimer - -in the documentation and/or other materials provided with the - -distribution. - - * Neither the name of Google Inc. nor the names of its - -contributors may be used to endorse or promote products derived from - -this software without specific prior written permission. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(go-inf-inf v0.9.1) - - - -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go - -Authors. All rights reserved. - - - -Redistribution and use in source and binary forms, with or without - -modification, are permitted provided that the following conditions are - -met: - - - - * Redistributions of source code must retain the above copyright - -notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - -copyright notice, this list of conditions and the following disclaimer - -in the documentation and/or other materials provided with the - -distribution. - - * Neither the name of Google Inc. nor the names of its - -contributors may be used to endorse or promote products derived from - -this software without specific prior written permission. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(mergo v0.3.5) - - - -Copyright (c) 2013 Dario Castañé. All rights reserved. - -Copyright (c) 2012 The Go Authors. All rights reserved. - - - -Redistribution and use in source and binary forms, with or without - -modification, are permitted provided that the following conditions are - -met: - - - - * Redistributions of source code must retain the above copyright - -notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - -copyright notice, this list of conditions and the following disclaimer - -in the documentation and/or other materials provided with the - -distribution. - - * Neither the name of Google Inc. nor the names of its - -contributors may be used to endorse or promote products derived from - -this software without specific prior written permission. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(grpc-gateway 20180820-snapshot, grpc-gateway v1.5.0) - - - -Copyright (c) 2015, Gengo, Inc. - -All rights reserved. - - - -Redistribution and use in source and binary forms, with or without modification, - -are permitted provided that the following conditions are met: - - - - * Redistributions of source code must retain the above copyright notice, - - this list of conditions and the following disclaimer. - - - - * Redistributions in binary form must reproduce the above copyright notice, - - this list of conditions and the following disclaimer in the documentation - - and/or other materials provided with the distribution. - - - - * Neither the name of Gengo, Inc. nor the names of its - - contributors may be used to endorse or promote products derived from this - - software without specific prior written permission. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(code.googlesource.com/google-api-go-client v0.4.0, docker-cli 20180417-snapshot, evanphx/json-patch v4.2.0, Go programming language 1.10.8, Go programming language 20171111-snapshot, gogo-protobuf v1.3.0, golang protobuf v1.3.1, golang protobuf v1.3.2, golang-github-ghodss-yaml-dev 20190128-snapshot, golang-github-spf13-pflag-dev v1.0.5, golang-gogoprotobuf-dev 1.2.1+git20190611.dadb6258, golang-golang-x-oauth2 0.0~git20190604.0f29369, golang-google-grpc 1.22.1, golang-snappy-go-dev v0.0.1, golang-x-text-dev 0.3.2, golang.org/x/crypto 20181025-snapshot-74cb1d3d, golang.org/x/net 20181115-snapshot-adae6a3d, golang.org/x/oauth2 20190130-snapshot-99b60b75, golang.org/x/sync 20181108-snapshot-42b31787, golang.org/x/sys 20190212-snapshot-3b520910, golang.org/x/text 20171230-snapshot, golang/oauth2 0, golang/sync 20181108-snapshot-42b31787, golang/text 20181210-snapshot-17bcc049, google-api-go-client v0.4.0, google/go-cmp v0.3.0, google/uuid v1.1.1, googleapis/google-api-go-client v0.4.0, Gorilla Web Toolkit v1.7.3, grpc-gateway v1.9.5, libcomerr2 1.45.2, libevent - an event notification library 2.1.10, libnfsidmap 2.3.4, Protocol Buffer Java API 3.7.0, sigs.k8s.io/yaml v1.1.0, TransmogrifAI 0.3.4) - - - -Copyright (c) , - -All rights reserved. - - - -Redistribution and use in source and binary forms, with or without modification, - -are permitted provided that the following conditions are met: - - - - * Redistributions of source code must retain the above copyright notice, this - - list of conditions and the following disclaimer. - - - - * Redistributions in binary form must reproduce the above copyright notice, - - this list of conditions and the following disclaimer in the documentation - - and/or other materials provided with the distribution. - - - - * Neither the name of the nor the names of its contributors may - - be used to endorse or promote products derived from this software without - - specific prior written permission. - - - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - -OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(golang protobuf v1.2.0) - - - -Copyright 2010 The Go Authors. All rights reserved. - - - -Redistribution and use in source and binary forms, with or without - -modification, are permitted provided that the following conditions are - -met: - - - - * Redistributions of source code must retain the above copyright - -notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - -copyright notice, this list of conditions and the following disclaimer - -in the documentation and/or other materials provided with the - -distribution. - - * Neither the name of Google Inc. nor the names of its - -contributors may be used to endorse or promote products derived from - -this software without specific prior written permission. - - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -BSD 3-clause "New" or "Revised" License - -(pmezard-go-difflib 1.0.0) - - - -Source: https://github.com/pmezard/go-difflib - - - -Files: * - -Copyright: 2013 Patrick Mézard - -License: BSD-3-clause - - - -Files: debian/* - -Copyright: 2016 Dmitry Smirnov - -License: BSD-3-clause - - - -License: BSD-3-clause - - - -Redistribution and use in source and binary forms, with or without - - modification, are permitted provided that the following conditions are - - met: - - . - - Redistributions of source code must retain the above copyright - - notice, this list of conditions and the following disclaimer. - - . - - Redistributions in binary form must reproduce the above copyright - - notice, this list of conditions and the following disclaimer in the - - documentation and/or other materials provided with the distribution. - - . - - The names of its contributors may not be used to endorse or promote - - products derived from this software without specific prior written - - permission. - - . - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS - - IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - - TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - - PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - - HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - - TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE - - - ---- - - - -Common Development and Distribution License 1.1 - -(javax.annotation API 1.3.2) - - - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -============================================================== - - - -1. Definitions. - - - - 1.1. “Contributor” means each individual or entity that creates or contributes - - to the creation of Modifications. - - - - 1.2. “Contributor Version” means the combination of the Original Software, - - prior Modifications used by a Contributor (if any), and the Modifications made - - by that particular Contributor. - - - - 1.3. “Covered Software” means (a) the Original Software, or (b) Modifications, - - or (c) the combination of files containing Original Software with files - - containing Modifications, in each case including portions thereof. - - - - 1.4. “Executable” means the Covered Software in any form other than Source - - Code. - - - - 1.5. “Initial Developer” means the individual or entity that first makes - - Original Software available under this License. - - - - 1.6. “Larger Work” means a work which combines Covered Software or portions - - thereof with code not governed by the terms of this License. - - - - 1.7. “License” means this document. - - - - 1.8. “Licensable” means having the right to grant, to the maximum extent - - possible, whether at the time of the initial grant or subsequently acquired, - - any and all of the rights conveyed herein. - - - - 1.9. “Modifications” means the Source Code and Executable form of any of the - - following: - - - - A. Any file that results from an addition to, deletion from or modification - - of the contents of a file containing Original Software or previous - - Modifications; - - - - B. Any new file that contains any part of the Original Software or previous - - Modification; or - - - - C. Any new file that is contributed or otherwise made available under the - - terms of this License. - - - - - - 1.10. “Original Software” means the Source Code and Executable form of computer - - software code that is originally released under this License. - - - - 1.11. “Patent Claims” means any patent claim(s), now owned or hereafter - - acquired, including without limitation, method, process, and apparatus claims, - - in any patent Licensable by grantor. - - - - 1.12. “Source Code” means (a) the common form of computer software code in - - which modifications are made and (b) associated documentation included in or - - with such code. - - - - 1.13. “You” (or “Your”) means an individual or a legal entity exercising rights - - under, and complying with all of the terms of, this License. For legal - - entities, “You” includes any entity which controls, is controlled by, or is - - under common control with You. For purposes of this definition, “control” means - - (a) the power, direct or indirect, to cause the direction or management of such - - entity, whether by contract or otherwise, or (b) ownership of more than fifty - - percent (50%) of the outstanding shares or beneficial ownership of such entity. - - - -2. License Grants. - - - - 2.1. The Initial Developer Grant. - - - - Conditioned upon Your compliance with Section 3.1 below and subject to third - - party intellectual property claims, the Initial Developer hereby grants You a - - world-wide, royalty-free, non-exclusive license: - - - - (a) under intellectual property rights (other than patent or trademark) - - Licensable by Initial Developer, to use, reproduce, modify, display, perform, - - sublicense and distribute the Original Software (or portions thereof), with - - or without Modifications, and/or as part of a Larger Work; and - - - - (b) under Patent Claims infringed by the making, using or selling of Original - - Software, to make, have made, use, practice, sell, and offer for sale, and/or - - otherwise dispose of the Original Software (or portions thereof). - - - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date - - Initial Developer first distributes or otherwise makes the Original Software - - available to a third party under the terms of this License. - - - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) - - for code that You delete from the Original Software, or (2) for infringements - - caused by: (i) the modification of the Original Software, or (ii) the - - combination of the Original Software with other software or devices. - - - - - - 2.2. Contributor Grant. - - - - Conditioned upon Your compliance with Section 3.1 below and subject to third - - party intellectual property claims, each Contributor hereby grants You a - - world-wide, royalty-free, non-exclusive license: - - - - (a) under intellectual property rights (other than patent or trademark) - - Licensable by Contributor to use, reproduce, modify, display, perform, - - sublicense and distribute the Modifications created by such Contributor (or - - portions thereof), either on an unmodified basis, with other Modifications, - - as Covered Software and/or as part of a Larger Work; and - - - - (b) under Patent Claims infringed by the making, using, or selling of - - Modifications made by that Contributor either alone and/or in combination - - with its Contributor Version (or portions of such combination), to make, use, - - sell, offer for sale, have made, and/or otherwise dispose of: (1) - - Modifications made by that Contributor (or portions thereof); and (2) the - - combination of Modifications made by that Contributor with its Contributor - - Version (or portions of such combination). - - - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the - - date Contributor first distributes or otherwise makes the Modifications - - available to a third party. - - - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: - - - - (1) for any code that Contributor has deleted from the Contributor Version; - - - - (2) for infringements caused by: (i) third party modifications of - - Contributor Version, or (ii) the combination of Modifications made by that - - Contributor with other software (except as part of the Contributor Version) - - or other devices; or - - - - (3) under Patent Claims infringed by Covered Software in the absence of - - Modifications made by that Contributor. - - - - - - 3. Distribution Obligations. - - - - 3.1. Availability of Source Code. - - - - Any Covered Software that You distribute or otherwise make available in - - Executable form must also be made available in Source Code form and that - - Source Code form must be distributed only under the terms of this License. - - You must include a copy of this License with every copy of the Source Code - - form of the Covered Software You distribute or otherwise make available. You - - must inform recipients of any such Covered Software in Executable form as to - - how they can obtain such Covered Software in Source Code form in a reasonable - - manner on or through a medium customarily used for software exchange. - - - - 3.2. Modifications. - - - - The Modifications that You create or to which You contribute are governed by - - the terms of this License. You represent that You believe Your Modifications - - are Your original creation(s) and/or You have sufficient rights to grant the - - rights conveyed by this License. - - - - 3.3. Required Notices. - - - - You must include a notice in each of Your Modifications that identifies You - - as the Contributor of the Modification. You may not remove or alter any - - copyright, patent or trademark notices contained within the Covered Software, - - or any notices of licensing or any descriptive text giving attribution to any - - Contributor or the Initial Developer. - - - - 3.4. Application of Additional Terms. - - - - You may not offer or impose any terms on any Covered Software in Source Code - - form that alters or restricts the applicable version of this License or the - - recipients' rights hereunder. You may choose to offer, and to charge a fee - - for, warranty, support, indemnity or liability obligations to one or more - - recipients of Covered Software. However, you may do so only on Your own - - behalf, and not on behalf of the Initial Developer or any Contributor. You - - must make it absolutely clear that any such warranty, support, indemnity or - - liability obligation is offered by You alone, and You hereby agree to - - indemnify the Initial Developer and every Contributor for any liability - - incurred by the Initial Developer or such Contributor as a result of - - warranty, support, indemnity or liability terms You offer. - - - - 3.5. Distribution of Executable Versions. - - - - You may distribute the Executable form of the Covered Software under the - - terms of this License or under the terms of a license of Your choice, which - - may contain terms different from this License, provided that You are in - - compliance with the terms of this License and that the license for the - - Executable form does not attempt to limit or alter the recipient's rights in - - the Source Code form from the rights set forth in this License. If You - - distribute the Covered Software in Executable form under a different license, - - You must make it absolutely clear that any terms which differ from this - - License are offered by You alone, not by the Initial Developer or - - Contributor. You hereby agree to indemnify the Initial Developer and every - - Contributor for any liability incurred by the Initial Developer or such - - Contributor as a result of any such terms You offer. - - - - 3.6. Larger Works. - - - - You may create a Larger Work by combining Covered Software with other code - - not governed by the terms of this License and distribute the Larger Work as a - - single product. In such a case, You must make sure the requirements of this - - License are fulfilled for the Covered Software. - - - - - - 4. Versions of the License. - - - - 4.1. New Versions. - - - - Oracle is the initial license steward and may publish revised and/or new - - versions of this License from time to time. Each version will be given a - - distinguishing version number. Except as provided in Section 4.3, no one - - other than the license steward has the right to modify this License. - - - - 4.2. Effect of New Versions. - - - - You may always continue to use, distribute or otherwise make the Covered - - Software available under the terms of the version of the License under which - - You originally received the Covered Software. If the Initial Developer - - includes a notice in the Original Software prohibiting it from being - - distributed or otherwise made available under any subsequent version of the - - License, You must distribute and make the Covered Software available under - - the terms of the version of the License under which You originally received - - the Covered Software. Otherwise, You may also choose to use, distribute or - - otherwise make the Covered Software available under the terms of any - - subsequent version of the License published by the license steward. - - - - 4.3. Modified Versions. - - - - When You are an Initial Developer and You want to create a new license for - - Your Original Software, You may create and use a modified version of this - - License if You: (a) rename the license and remove any references to the name - - of the license steward (except to note that the license differs from this - - License); and (b) otherwise make it clear that the license contains terms - - which differ from this License. - - - - - - 5. DISCLAIMER OF WARRANTY. - - - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN “AS IS” BASIS, WITHOUT - - WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT - - LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, - - MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK - - AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD - - ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL - - DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART - - OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT - - UNDER THIS DISCLAIMER. - - - - - - - - 6. TERMINATION. - - - - 6.1. This License and the rights granted hereunder will terminate - - automatically if You fail to comply with terms herein and fail to cure such - - breach within 30 days of becoming aware of the breach. Provisions which, by - - their nature, must remain in effect beyond the termination of this License - - shall survive. - - - - 6.2. If You assert a patent infringement claim (excluding declaratory - - judgment actions) against Initial Developer or a Contributor (the Initial - - Developer or Contributor against whom You assert such claim is referred to as - - “Participant”) alleging that the Participant Software (meaning the - - Contributor Version where the Participant is a Contributor or the Original - - Software where the Participant is the Initial Developer) directly or - - indirectly infringes any patent, then any and all rights granted directly or - - indirectly to You by such Participant, the Initial Developer (if the Initial - - Developer is not the Participant) and all Contributors under Sections 2.1 - - and/or 2.2 of this License shall, upon 60 days notice from Participant - - terminate prospectively and automatically at the expiration of such 60 day - - notice period, unless if within such 60 day period You withdraw Your claim - - with respect to the Participant Software against such Participant either - - unilaterally or pursuant to a written agreement with Participant. - - - - 6.3. If You assert a patent infringement claim against Participant alleging - - that the Participant Software directly or indirectly infringes any patent - - where such claim is resolved (such as by license or settlement) prior to the - - initiation of patent infringement litigation, then the reasonable value of - - the licenses granted by such Participant under Sections 2.1 or 2.2 shall be - - taken into account in determining the amount or value of any payment or - - license. - - - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end - - user licenses that have been validly granted by You or any distributor - - hereunder prior to termination (excluding licenses granted to You by any - - distributor) shall survive termination. - - - - - - 7. LIMITATION OF LIABILITY. - - - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING - - NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY - - OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF - - ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, - - INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR - - MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH - - PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS - - LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW - - PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND - - LIMITATION MAY NOT APPLY TO YOU. - - - - - - - - 8. U.S. GOVERNMENT END USERS. - - - - The Covered Software is a “commercial item,” as that term is defined in 48 - - C.F.R. 2.101 (Oct. 1995), consisting of “commercial computer software” (as that - - term is defined at 48 C.F.R. § 252.227-7014(a)(1)) and “commercial computer - - software documentation” as such terms are used in 48 C.F.R. 12.212 (Sept. - - 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through - - 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software - - with only those rights set forth herein. This U.S. Government Rights clause is - - in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision - - that addresses Government rights in computer software under this License. - - - - - - - - 9. MISCELLANEOUS. - - - - This License represents the complete agreement concerning subject matter - - hereof. If any provision of this License is held to be unenforceable, such - - provision shall be reformed only to the extent necessary to make it - - enforceable. This License shall be governed by the law of the jurisdiction - - specified in a notice contained within the Original Software (except to the - - extent applicable law, if any, provides otherwise), excluding such - - jurisdiction's conflict-of-law provisions. Any litigation relating to this - - License shall be subject to the jurisdiction of the courts located in the - - jurisdiction and venue specified in a notice contained within the Original - - Software, with the losing party responsible for costs, including, without - - limitation, court costs and reasonable attorneys' fees and expenses. The - - application of the United Nations Convention on Contracts for the International - - Sale of Goods is expressly excluded. Any law or regulation which provides that - - the language of a contract shall be construed against the drafter shall not - - apply to this License. You agree that You alone are responsible for compliance - - with the United States export administration regulations (and the export - - control laws and regulation of any other countries) when You use, distribute or - - otherwise make available any Covered Software. - - - - - - - - 10. RESPONSIBILITY FOR CLAIMS. - - - - As between Initial Developer and the Contributors, each party is responsible - - for claims and damages arising, directly or indirectly, out of its utilization - - of rights under this License and You agree to work with Initial Developer and - - Contributors to distribute such responsibility on an equitable basis. Nothing - - herein is intended or shall be deemed to constitute any admission of liability. - - - - - - - - ------------------------------------------------------------------------------ - - - - NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE - - (CDDL) - - - - The code released under the CDDL shall be governed by the laws of the State of - - California (excluding conflict-of-law provisions). Any litigation relating to - - this License shall be subject to the jurisdiction of the Federal Courts of the - - Northern District of California and the state courts of the State of - - California, with venue lying in Santa Clara County, California. - - - ---- - - - -Educational Community License v2.0 - -(docker-cli 20180417-snapshot, golang-github-docker-go-connections-dev 20181130-snapshot) - - - -Educational Community License - -Version 2.0, April 2007 - -============================= - - - -http://www.osedu.org/licenses/ - - - -The Educational Community License version 2.0 ("ECL") consists of the Apache 2.0 - -license, modified to change the scope of the patent grant in section 3 to be - -specific to the needs of the education communities using this license. The - -original Apache 2.0 license can be found at: - -http://www.apache.org/licenses/LICENSE-2.0 - - - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - - -1. Definitions. - - - -"License" shall mean the terms and conditions for use, reproduction, and - -distribution as defined by Sections 1 through 9 of this document. - - - -"Licensor" shall mean the copyright owner or entity authorized by the copyright - -owner that is granting the License. - - - -"Legal Entity" shall mean the union of the acting entity and all other entities - -that control, are controlled by, or are under common control with that entity. - -For the purposes of this definition, "control" means - - - - i. the power, direct or indirect, to cause the direction or management of such - - entity, whether by contract or otherwise, or - - - - ii. ownership of fifty percent (50%) or more of the outstanding shares, or - - - - iii. beneficial ownership of such entity. - - - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions - -granted by this License. - - - -"Source" form shall mean the preferred form for making modifications, including - -but not limited to software source code, documentation source, and configuration - -files. - - - -"Object" form shall mean any form resulting from mechanical transformation or - -translation of a Source form, including but not limited to compiled object code, - -generated documentation, and conversions to other media types. - - - -"Work" shall mean the work of authorship, whether in Source or Object form, made - -available under the License, as indicated by a copyright notice that is included - -in or attached to the work (an example is provided in the Appendix below). - - - -"Derivative Works" shall mean any work, whether in Source or Object form, that is - -based on (or derived from) the Work and for which the editorial revisions, - -annotations, elaborations, or other modifications represent, as a whole, an - -original work of authorship. For the purposes of this License, Derivative Works - -shall not include works that remain separable from, or merely link (or bind by - -name) to the interfaces of, the Work and Derivative Works thereof. - - - -"Contribution" shall mean any work of authorship, including the original version - -of the Work and any modifications or additions to that Work or Derivative Works - -thereof, that is intentionally submitted to Licensor for inclusion in the Work by - -the copyright owner or by an individual or Legal Entity authorized to submit on - -behalf of the copyright owner. For the purposes of this definition, "submitted" - -means any form of electronic, verbal, or written communication sent to the - -Licensor or its representatives, including but not limited to communication on - -electronic mailing lists, source code control systems, and issue tracking systems - -that are managed by, or on behalf of, the Licensor for the purpose of discussing - -and improving the Work, but excluding communication that is conspicuously marked - -or otherwise designated in writing by the copyright owner as "Not a - -Contribution." - - - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of - -whom a Contribution has been received by Licensor and subsequently incorporated - -within the Work. - - - -2. Grant of Copyright License. - - - -Subject to the terms and conditions of this License, each Contributor hereby - -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, - -irrevocable copyright license to reproduce, prepare Derivative Works of, publicly - -display, publicly perform, sublicense, and distribute the Work and such - -Derivative Works in Source or Object form. - - - -3. Grant of Patent License. - - - -Subject to the terms and conditions of this License, each Contributor hereby - -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, - -irrevocable (except as stated in this section) patent license to make, have made, - -use, offer to sell, sell, import, and otherwise transfer the Work, where such - -license applies only to those patent claims licensable by such Contributor that - -are necessarily infringed by their Contribution(s) alone or by combination of - -their Contribution(s) with the Work to which such Contribution(s) was submitted. - -If You institute patent litigation against any entity (including a cross-claim or - -counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated - -within the Work constitutes direct or contributory patent infringement, then any - -patent licenses granted to You under this License for that Work shall terminate - -as of the date such litigation is filed. Any patent license granted hereby with - -respect to contributions by an individual employed by an institution or - -organization is limited to patent claims where the individual that is the author - -of the Work is also the inventor of the patent claims licensed, and where the - -organization or institution has the right to grant such license under applicable - -grant and research funding agreements. No other express or implied licenses are - -granted. - - - -4. Redistribution. - - - -You may reproduce and distribute copies of the Work or Derivative Works thereof - -in any medium, with or without modifications, and in Source or Object form, - -provided that You meet the following conditions: - - - - 1. You must give any other recipients of the Work or Derivative Works a copy of - - this License; and - - - - 2. You must cause any modified files to carry prominent notices stating that - - You changed the files; and - - - - 3. You must retain, in the Source form of any Derivative Works that You - - distribute, all copyright, patent, trademark, and attribution notices from - - the Source form of the Work, excluding those notices that do not pertain to - - any part of the Derivative Works; and - - - - 4. If the Work includes a "NOTICE" text file as part of its distribution, then - - any Derivative Works that You distribute must include a readable copy of the - - attribution notices contained within such NOTICE file, excluding those - - notices that do not pertain to any part of the Derivative Works, in at least - - one of the following places: within a NOTICE text file distributed as part of - - the Derivative Works; within the Source form or documentation, if provided - - along with the Derivative Works; or, within a display generated by the - - Derivative Works, if and wherever such third-party notices normally appear. - - The contents of the NOTICE file are for informational purposes only and do - - not modify the License. You may add Your own attribution notices within - - Derivative Works that You distribute, alongside or as an addendum to the - - NOTICE text from the Work, provided that such additional attribution notices - - cannot be construed as modifying the License. - - - -You may add Your own copyright statement to Your modifications and may provide - -additional or different license terms and conditions for use, reproduction, or - -distribution of Your modifications, or for any such Derivative Works as a whole, - -provided Your use, reproduction, and distribution of the Work otherwise complies - -with the conditions stated in this License. - - - -5. Submission of Contributions. - - - -Unless You explicitly state otherwise, any Contribution intentionally submitted - -for inclusion in the Work by You to the Licensor shall be under the terms and - -conditions of this License, without any additional terms or conditions. - -Notwithstanding the above, nothing herein shall supersede or modify the terms of - -any separate license agreement you may have executed with Licensor regarding such - -Contributions. - - - -6. Trademarks. - - - -This License does not grant permission to use the trade names, trademarks, - -service marks, or product names of the Licensor, except as required for - -reasonable and customary use in describing the origin of the Work and reproducing - -the content of the NOTICE file. - - - -7. Disclaimer of Warranty. - - - -Unless required by applicable law or agreed to in writing, Licensor provides the - -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, - -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, - -including, without limitation, any warranties or conditions of TITLE, - -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are - -solely responsible for determining the appropriateness of using or redistributing - -the Work and assume any risks associated with Your exercise of permissions under - -this License. - - - -8. Limitation of Liability. - - - -In no event and under no legal theory, whether in tort (including negligence), - -contract, or otherwise, unless required by applicable law (such as deliberate and - -grossly negligent acts) or agreed to in writing, shall any Contributor be liable - -to You for damages, including any direct, indirect, special, incidental, or - -consequential damages of any character arising as a result of this License or out - -of the use or inability to use the Work (including but not limited to damages for - -loss of goodwill, work stoppage, computer failure or malfunction, or any and all - -other commercial damages or losses), even if such Contributor has been advised of - -the possibility of such damages. - - - -9. Accepting Warranty or Additional Liability. - - - -While redistributing the Work or Derivative Works thereof, You may choose to - -offer, and charge a fee for, acceptance of support, warranty, indemnity, or other - -liability obligations and/or rights consistent with this License. However, in - -accepting such obligations, You may act only on Your own behalf and on Your sole - -responsibility, not on behalf of any other Contributor, and only if You agree to - -indemnify, defend, and hold each Contributor harmless for any liability incurred - -by, or claims asserted against, such Contributor by reason of your accepting any - -such warranty or additional liability. - - - -END OF TERMS AND CONDITIONS - - - -APPENDIX: How to apply the Educational Community License to your work - - - -To apply the Educational Community License to your work, attach the following - -boilerplate notice, with the fields enclosed by brackets "[]" replaced with your - -own identifying information. (Don't include the brackets!) The text should be - -enclosed in the appropriate comment syntax for the file format. We also recommend - -that a file or class name and description of purpose be included on the same - -"printed page" as the copyright notice for easier identification within - -third-party archives. - - - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Educational Community License, Version 2.0 (the "License"); - - you may not use this file except in compliance with the License. You may obtain - - a copy of the License at - - http://www.osedu.org/licenses/ECL-2.0 - - - - Unless required by applicable law or agreed to in writing, software distributed - - under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - - CONDITIONS OF ANY KIND, either express or implied. See the License for the - - specific language governing permissions and limitations under the License. - - - ---- - - - -Expat License - -(glycerine/go-unsnap-stream 0.0~git20180323.9f0cb55) - - - -Expat License - -============= - - - -Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd - -                               and Clark Cooper - -Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Expat maintainers. - - - -Permission is hereby granted, free of charge, to any person obtaining a copy of - -this software and associated documentation files (the "Software"), to deal in the - -Software without restriction, including without limitation the rights to use, - -copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the - -Software, and to permit persons to whom the Software is furnished to do so, - -subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in all - -copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN - -AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - ---- - - - -GNU General Public License v2.0 only - -(alpine-baselayout 3.1.1, eudev 3.2.8) - - - -The GNU General Public License (GPL) - -==================================== - - - - - -Version 2, June 1991 - --------------------- - - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. - -59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - - -Everyone is permitted to copy and distribute verbatim copies - -of this license document, but changing it is not allowed. - - - -Preamble - - - -The licenses for most software are designed to take away your freedom to share - -and change it. By contrast, the GNU General Public License is intended to - -guarantee your freedom to share and change free software--to make sure the - -software is free for all its users. This General Public License applies to most - -of the Free Software Foundation's software and to any other program whose authors - -commit to using it. (Some other Free Software Foundation software is covered by - -the GNU Library General Public License instead.) You can apply it to your - -programs, too. - - - -When we speak of free software, we are referring to freedom, not price. Our - -General Public Licenses are designed to make sure that you have the freedom to - -distribute copies of free software (and charge for this service if you wish), - -that you receive source code or can get it if you want it, that you can change - -the software or use pieces of it in new free programs; and that you know you can - -do these things. - - - -To protect your rights, we need to make restrictions that forbid anyone to deny - -you these rights or to ask you to surrender the rights. These restrictions - -translate to certain responsibilities for you if you distribute copies of the - -software, or if you modify it. - - - -For example, if you distribute copies of such a program, whether gratis or for a - -fee, you must give the recipients all the rights that you have. You must make - -sure that they, too, receive or can get the source code. And you must show them - -these terms so they know their rights. - - - -We protect your rights with two steps: (1) copyright the software, and (2) offer - -you this license which gives you legal permission to copy, distribute and/or - -modify the software. - - - -Also, for each author's protection and ours, we want to make certain that - -everyone understands that there is no warranty for this free software. If the - -software is modified by someone else and passed on, we want its recipients to - -know that what they have is not the original, so that any problems introduced by - -others will not reflect on the original authors' reputations. - - - -Finally, any free program is threatened constantly by software patents. We wish - -to avoid the danger that redistributors of a free program will individually - -obtain patent licenses, in effect making the program proprietary. To prevent - -this, we have made it clear that any patent must be licensed for everyone's free - -use or not licensed at all. - - - -The precise terms and conditions for copying, distribution and modification - -follow. - - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - - -0. This License applies to any program or other work which contains a notice - -placed by the copyright holder saying it may be distributed under the terms of - -this General Public License. The "Program", below, refers to any such program or - -work, and a "work based on the Program" means either the Program or any - -derivative work under copyright law: that is to say, a work containing the - -Program or a portion of it, either verbatim or with modifications and/or - -translated into another language. (Hereinafter, translation is included without - -limitation in the term "modification".) Each licensee is addressed as "you". - - - -Activities other than copying, distribution and modification are not covered by - -this License; they are outside its scope. The act of running the Program is not - -restricted, and the output from the Program is covered only if its contents - -constitute a work based on the Program (independent of having been made by - -running the Program). Whether that is true depends on what the Program does. - - - -1. You may copy and distribute verbatim copies of the Program's source code as - -you receive it, in any medium, provided that you conspicuously and appropriately - -publish on each copy an appropriate copyright notice and disclaimer of warranty; - -keep intact all the notices that refer to this License and to the absence of any - -warranty; and give any other recipients of the Program a copy of this License - -along with the Program. - - - -You may charge a fee for the physical act of transferring a copy, and you may at - -your option offer warranty protection in exchange for a fee. - - - -2. You may modify your copy or copies of the Program or any portion of it, thus - -forming a work based on the Program, and copy and distribute such modifications - -or work under the terms of Section 1 above, provided that you also meet all of - -these conditions: - - - - a) You must cause the modified files to carry prominent notices stating - - that you changed the files and the date of any change. - - - - b) You must cause any work that you distribute or publish, that in whole or - - in part contains or is derived from the Program or any part thereof, to be - - licensed as a whole at no charge to all third parties under the terms of - - this License. - - - - c) If the modified program normally reads commands interactively when run, - - you must cause it, when started running for such interactive use in the - - most ordinary way, to print or display an announcement including an - - appropriate copyright notice and a notice that there is no warranty (or - - else, saying that you provide a warranty) and that users may redistribute - - the program under these conditions, and telling the user how to view a copy - - of this License. (Exception: if the Program itself is interactive but does - - not normally print such an announcement, your work based on the Program is - - not required to print an announcement.) - - - -These requirements apply to the modified work as a whole. If identifiable - -sections of that work are not derived from the Program, and can be reasonably - -considered independent and separate works in themselves, then this License, and - -its terms, do not apply to those sections when you distribute them as separate - -works. But when you distribute the same sections as part of a whole which is a - -work based on the Program, the distribution of the whole must be on the terms of - -this License, whose permissions for other licensees extend to the entire whole, - -and thus to each and every part regardless of who wrote it. - - - -Thus, it is not the intent of this section to claim rights or contest your rights - -to work written entirely by you; rather, the intent is to exercise the right to - -control the distribution of derivative or collective works based on the Program. - - - -In addition, mere aggregation of another work not based on the Program with the - -Program (or with a work based on the Program) on a volume of a storage or - -distribution medium does not bring the other work under the scope of this - -License. - - - -3. You may copy and distribute the Program (or a work based on it, under Section - -2) in object code or executable form under the terms of Sections 1 and 2 above - -provided that you also do one of the following: - - - - a) Accompany it with the complete corresponding machine-readable source - - code, which must be distributed under the terms of Sections 1 and 2 above - - on a medium customarily used for software interchange; or, - - - - b) Accompany it with a written offer, valid for at least three years, to - - give any third party, for a charge no more than your cost of physically - - performing source distribution, a complete machine-readable copy of the - - corresponding source code, to be distributed under the terms of Sections 1 - - and 2 above on a medium customarily used for software interchange; or, - - - - c) Accompany it with the information you received as to the offer to - - distribute corresponding source code. (This alternative is allowed only for - - noncommercial distribution - - - -and only if you received the program in object code or executable form with such - -an offer, in accord with Subsection b above.) - - - -The source code for a work means the preferred form of the work for making - -modifications to it. For an executable work, complete source code means all the - -source code for all modules it contains, plus any associated interface definition - -files, plus the scripts used to control compilation and installation of the - -executable. However, as a special exception, the source code distributed need not - -include anything that is normally distributed (in either source or binary form) - -with the major components (compiler, kernel, and so on) of the operating system - -on which the executable runs, unless that component itself accompanies the - -executable. - - - -If distribution of executable or object code is made by offering access to copy - -from a designated place, then offering equivalent access to copy the source code - -from the same place counts as distribution of the source code, even though third - -parties are not compelled to copy the source along with the object code. - - - -4. You may not copy, modify, sublicense, or distribute the Program except as - -expressly provided under this License. Any attempt otherwise to copy, modify, - -sublicense or distribute the Program is void, and will automatically terminate - -your rights under this License. However, parties who have received copies, or - -rights, from you under this License will not have their licenses terminated so - -long as such parties remain in full compliance. - - - -5. You are not required to accept this License, since you have not signed it. - -However, nothing else grants you permission to modify or distribute the Program - -or its derivative works. These actions are prohibited by law if you do not accept - -this License. Therefore, by modifying or distributing the Program (or any work - -based on the Program), you indicate your acceptance of this License to do so, and - -all its terms and conditions for copying, distributing or modifying the Program - -or works based on it. - - - -6. Each time you redistribute the Program (or any work based on the Program), the - -recipient automatically receives a license from the original licensor to copy, - -distribute or modify the Program subject to these terms and conditions. You may - -not impose any further restrictions on the recipients' exercise of the rights - -granted herein. You are not responsible for enforcing compliance by third parties - -to this License. - - - -7. If, as a consequence of a court judgment or allegation of patent infringement - -or for any other reason (not limited to patent issues), conditions are imposed on - -you (whether by court order, agreement or otherwise) that contradict the - -conditions of this License, they do not excuse you from the conditions of this - -License. If you cannot distribute so as to satisfy simultaneously your - -obligations under this License and any other pertinent obligations, then as a - -consequence you may not distribute the Program at all. For example, if a patent - -license would not permit royalty-free redistribution of the Program by all those - -who receive copies directly or indirectly through you, then the only way you - -could satisfy both it and this License would be to refrain entirely from - -distribution of the Program. - - - -If any portion of this section is held invalid or unenforceable under any - -particular circumstance, the balance of the section is intended to apply and the - -section as a whole is intended to apply in other circumstances. - - - -It is not the purpose of this section to induce you to infringe any patents or - -other property right claims or to contest validity of any such claims; this - -section has the sole purpose of protecting the integrity of the free software - -distribution system, which is implemented by public license practices. Many - -people have made generous contributions to the wide range of software distributed - -through that system in reliance on consistent application of that system; it is - -up to the author/donor to decide if he or she is willing to distribute software - -through any other system and a licensee cannot impose that choice. - - - -This section is intended to make thoroughly clear what is believed to be a - -consequence of the rest of this License. - - - -8. If the distribution and/or use of the Program is restricted in certain - -countries either by patents or by copyrighted interfaces, the original copyright - -holder who places the Program under this License may add an explicit geographical - -distribution limitation excluding those countries, so that distribution is - -permitted only in or among countries not thus excluded. In such case, this - -License incorporates the limitation as if written in the body of this License. - - - -9. The Free Software Foundation may publish revised and/or new versions of the - -General Public License from time to time. Such new versions will be similar in - -spirit to the present version, but may differ in detail to address new problems - -or concerns. - - - -Each version is given a distinguishing version number. If the Program specifies a - -version number of this License which applies to it and "any later version", you - -have the option of following the terms and conditions either of that version or - -of any later version published by the Free Software Foundation. If the Program - -does not specify a version number of this License, you may choose any version - -ever published by the Free Software Foundation. - - - -10. If you wish to incorporate parts of the Program into other free programs - -whose distribution conditions are different, write to the author to ask for - -permission. For software which is copyrighted by the Free Software Foundation, - -write to the Free Software Foundation; we sometimes make exceptions for this. Our - -decision will be guided by the two goals of preserving the free status of all - -derivatives of our free software and of promoting the sharing and reuse of - -software generally. - - - -NO WARRANTY - - - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE - -PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED - -IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" - -WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - -PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE - -PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF - -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL - -ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE - -PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, - -SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY - -TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING - -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF - -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER - -PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - - -END OF TERMS AND CONDITIONS - - - ---- - - - -GNU General Public License v2.0 or later - -(BusyBox 1_31_0, e2fsprogs 1.45.4, keyutils 1.6, Linux Extended Attributes 2.2.52, LVM2 2.02.184, util-linux 2.33.2) - - - -The GNU General Public License (GPL) - -==================================== - - - - - -Version 2, June 1991 - --------------------- - - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. - -59 Temple Place, Suite 330, Boston, MA 02111-1307 USAEveryone is permitted to copy and distribute verbatim copies - -of this license document, but changing it is not allowed. - - - -Preamble - - - -The licenses for most software are designed to take away your freedom to share - -and change it. By contrast, the GNU General Public License is intended to - -guarantee your freedom to share and change free software--to make sure the - -software is free for all its users. This General Public License applies to most - -of the Free Software Foundation's software and to any other program whose authors - -commit to using it. (Some other Free Software Foundation software is covered by - -the GNU Library General Public License instead.) You can apply it to your - -programs, too. - - - -When we speak of free software, we are referring to freedom, not price. Our - -General Public Licenses are designed to make sure that you have the freedom to - -distribute copies of free software (and charge for this service if you wish), - -that you receive source code or can get it if you want it, that you can change - -the software or use pieces of it in new free programs; and that you know you can - -do these things. - - - -To protect your rights, we need to make restrictions that forbid anyone to deny - -you these rights or to ask you to surrender the rights. These restrictions - -translate to certain responsibilities for you if you distribute copies of the - -software, or if you modify it. - - - -For example, if you distribute copies of such a program, whether gratis or for a - -fee, you must give the recipients all the rights that you have. You must make - -sure that they, too, receive or can get the source code. And you must show them - -these terms so they know their rights. - - - -We protect your rights with two steps: (1) copyright the software, and (2) offer - -you this license which gives you legal permission to copy, distribute and/or - -modify the software. - - - -Also, for each author's protection and ours, we want to make certain that - -everyone understands that there is no warranty for this free software. If the - -software is modified by someone else and passed on, we want its recipients to - -know that what they have is not the original, so that any problems introduced by - -others will not reflect on the original authors' reputations. - - - -Finally, any free program is threatened constantly by software patents. We wish - -to avoid the danger that redistributors of a free program will individually - -obtain patent licenses, in effect making the program proprietary. To prevent - -this, we have made it clear that any patent must be licensed for everyone's free - -use or not licensed at all. - - - -The precise terms and conditions for copying, distribution and modification - -follow. - - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - - - 1. This License applies to any program or other work which contains a notice - - placed by the copyright holder saying it may be distributed under the terms - - of this General Public License. The "Program", below, refers to any such - - program or work, and a "work based on the Program" means either the Program - - or any derivative work under copyright law: that is to say, a work containing - - the Program or a portion of it, either verbatim or with modifications and/or - - translated into another language. (Hereinafter, translation is included - - without limitation in the term "modification".) Each licensee is addressed as - - "you". - - - - Activities other than copying, distribution and modification are not covered - - by this License; they are outside its scope. The act of running the Program - - is not restricted, and the output from the Program is covered only if its - - contents constitute a work based on the Program (independent of having been - - made by running the Program). Whether that is true depends on what the - - Program does. - - - - 2. You may copy and distribute verbatim copies of the Program's source code as - - you receive it, in any medium, provided that you conspicuously and - - appropriately publish on each copy an appropriate copyright notice and - - disclaimer of warranty; keep intact all the notices that refer to this - - License and to the absence of any warranty; and give any other recipients of - - the Program a copy of this License along with the Program. - - - - You may charge a fee for the physical act of transferring a copy, and you may - - at your option offer warranty protection in exchange for a fee. - - - - 3. You may modify your copy or copies of the Program or any portion of it, thus - - forming a work based on the Program, and copy and distribute such - - modifications or work under the terms of Section 1 above, provided that you - - also meet all of these conditions: - - - - a. You must cause the modified files to carry prominent notices stating - - that you changed the files and the date of any change. - - - - b. You must cause any work that you distribute or publish, that in whole or - - in part contains or is derived from the Program or any part thereof, to - - be licensed as a whole at no charge to all third parties under the terms - - of this License. - - - - c. If the modified program normally reads commands interactively when run, - - you must cause it, when started running for such interactive use in the - - most ordinary way, to print or display an announcement including an - - appropriate copyright notice and a notice that there is no warranty (or - - else, saying that you provide a warranty) and that users may redistribute - - the program under these conditions, and telling the user how to view a - - copy of this License. (Exception: if the Program itself is interactive - - but does not normally print such an announcement, your work based on the - - Program is not required to print an announcement.) - - - - These requirements apply to the modified work as a whole. If identifiable - - sections of that work are not derived from the Program, and can be reasonably - - considered independent and separate works in themselves, then this License, - - and its terms, do not apply to those sections when you distribute them as - - separate works. But when you distribute the same sections as part of a whole - - which is a work based on the Program, the distribution of the whole must be - - on the terms of this License, whose permissions for other licensees extend to - - the entire whole, and thus to each and every part regardless of who wrote it. - - - - Thus, it is not the intent of this section to claim rights or contest your - - rights to work written entirely by you; rather, the intent is to exercise the - - right to control the distribution of derivative or collective works based on - - the Program. - - - - In addition, mere aggregation of another work not based on the Program with - - the Program (or with a work based on the Program) on a volume of a storage or - - distribution medium does not bring the other work under the scope of this - - License. - - - - 4. You may copy and distribute the Program (or a work based on it, under - - Section 2) in object code or executable form under the terms of Sections 1 - - and 2 above provided that you also do one of the following: - - - - a. Accompany it with the complete corresponding machine-readable source - - code, which must be distributed under the terms of Sections 1 and 2 above - - on a medium customarily used for software interchange; or, - - - - b. Accompany it with a written offer, valid for at least three years, to - - give any third party, for a charge no more than your cost of physically - - performing source distribution, a complete machine-readable copy of the - - corresponding source code, to be distributed under the terms of Sections - - 1 and 2 above on a medium customarily used for software interchange; or, - - - - c. Accompany it with the information you received as to the offer to - - distribute corresponding source code. (This alternative is allowed only - - for noncommercial distribution and only if you received the program in - - object code or executable form with such an offer, in accord with - - Subsection b above.) - - - - The source code for a work means the preferred form of the work for making - - modifications to it. For an executable work, complete source code means all - - the source code for all modules it contains, plus any associated interface - - definition files, plus the scripts used to control compilation and - - installation of the executable. However, as a special exception, the source - - code distributed need not include anything that is normally distributed (in - - either source or binary form) with the major components (compiler, kernel, - - and so on) of the operating system on which the executable runs, unless that - - component itself accompanies the executable. - - - - If distribution of executable or object code is made by offering access to - - copy from a designated place, then offering equivalent access to copy the - - source code from the same place counts as distribution of the source code, - - even though third parties are not compelled to copy the source along with the - - object code. - - - - 5. You may not copy, modify, sublicense, or distribute the Program except as - - expressly provided under this License. Any attempt otherwise to copy, modify, - - sublicense or distribute the Program is void, and will automatically - - terminate your rights under this License. However, parties who have received - - copies, or rights, from you under this License will not have their licenses - - terminated so long as such parties remain in full compliance. - - - - 6. You are not required to accept this License, since you have not signed it. - - However, nothing else grants you permission to modify or distribute the - - Program or its derivative works. These actions are prohibited by law if you - - do not accept this License. Therefore, by modifying or distributing the - - Program (or any work based on the Program), you indicate your acceptance of - - this License to do so, and all its terms and conditions for copying, - - distributing or modifying the Program or works based on it. - - - - 7. Each time you redistribute the Program (or any work based on the Program), - - the recipient automatically receives a license from the original licensor to - - copy, distribute or modify the Program subject to these terms and conditions. - - You may not impose any further restrictions on the recipients' exercise of - - the rights granted herein. You are not responsible for enforcing compliance - - by third parties to this License. - - - - 8. If, as a consequence of a court judgment or allegation of patent - - infringement or for any other reason (not limited to patent issues), - - conditions are imposed on you (whether by court order, agreement or - - otherwise) that contradict the conditions of this License, they do not excuse - - you from the conditions of this License. If you cannot distribute so as to - - satisfy simultaneously your obligations under this License and any other - - pertinent obligations, then as a consequence you may not distribute the - - Program at all. For example, if a patent license would not permit - - royalty-free redistribution of the Program by all those who receive copies - - directly or indirectly through you, then the only way you could satisfy both - - it and this License would be to refrain entirely from distribution of the - - Program. - - - - If any portion of this section is held invalid or unenforceable under any - - particular circumstance, the balance of the section is intended to apply and - - the section as a whole is intended to apply in other circumstances. - - - - It is not the purpose of this section to induce you to infringe any patents - - or other property right claims or to contest validity of any such claims; - - this section has the sole purpose of protecting the integrity of the free - - software distribution system, which is implemented by public license - - practices. Many people have made generous contributions to the wide range of - - software distributed through that system in reliance on consistent - - application of that system; it is up to the author/donor to decide if he or - - she is willing to distribute software through any other system and a licensee - - cannot impose that choice. - - - - This section is intended to make thoroughly clear what is believed to be a - - consequence of the rest of this License. - - - - 9. If the distribution and/or use of the Program is restricted in certain - - countries either by patents or by copyrighted interfaces, the original - - copyright holder who places the Program under this License may add an - - explicit geographical distribution limitation excluding those countries, so - - that distribution is permitted only in or among countries not thus excluded. - - In such case, this License incorporates the limitation as if written in the - - body of this License. - - - - 10. The Free Software Foundation may publish revised and/or new versions of the - - General Public License from time to time. Such new versions will be similar - - in spirit to the present version, but may differ in detail to address new - - problems or concerns. - - - - Each version is given a distinguishing version number. If the Program - - specifies a version number of this License which applies to it and "any later - - version", you have the option of following the terms and conditions either of - - that version or of any later version published by the Free Software - - Foundation. If the Program does not specify a version number of this License, - - you may choose any version ever published by the Free Software Foundation. - - - - 11. If you wish to incorporate parts of the Program into other free programs - - whose distribution conditions are different, write to the author to ask for - - permission. For software which is copyrighted by the Free Software - - Foundation, write to the Free Software Foundation; we sometimes make - - exceptions for this. Our decision will be guided by the two goals of - - preserving the free status of all derivatives of our free software and of - - promoting the sharing and reuse of software generally. - - - - NO WARRANTY - - - - 12. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR - - THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE - - STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE - - PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - - INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - - FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND - - PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, - - YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - - 13. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL - - ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE - - THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY - - GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE - - OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR - - DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR - - A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH - - HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - - - END OF TERMS AND CONDITIONS - - - -How to Apply These Terms to Your New Programs - - - -If you develop a new program, and you want it to be of the greatest possible use - -to the public, the best way to achieve this is to make it free software which - -everyone can redistribute and change under these terms. - - - -To do so, attach the following notices to the program. It is safest to attach - -them to the start of each source file to most effectively convey the exclusion of - -warranty; and each file should have at least the "copyright" line and a pointer - -to where the full notice is found. - - - -one line to give the program's name and a brief idea of what it does.Copyright (C) - - - -This program is free software; you can redistribute it and/or - -modify it under the terms of the GNU General Public License - -as published by the Free Software Foundation; either version 2 - -of the License, or (at your option) any later version. - - - -This program is distributed in the hope that it will be useful, - -but WITHOUT ANY WARRANTY; without even the implied warranty of - -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - -GNU General Public License for more details. - - - -You should have received a copy of the GNU General Public License - -along with this program; if not, write to the Free Software - -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - - -Also add information on how to contact you by electronic and paper mail. - - - -If the program is interactive, make it output a short notice like this when it - -starts in an interactive mode: - - - -Gnomovision version 69, Copyright (C) year name of author - -Gnomovision comes with ABSOLUTELY NO WARRANTY; for details - -type `show w'. This is free software, and you are welcome - -to redistribute it under certain conditions; type `show c' - -for details. - - - -The hypothetical commands `show w' and `show c' should show the appropriate parts - -of the General Public License. Of course, the commands you use may be called - -something other than `show w' and `show c'; they could even be mouse-clicks or - -menu items--whatever suits your program. - - - -You should also get your employer (if you work as a programmer) or your school, - -if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a - -sample; alter the names: - - - -Yoyodyne, Inc., hereby disclaims all copyright - -interest in the program `Gnomovision' - -(which makes passes at compilers) written - -by James Hacker. - - - -signature of Ty Coon, 1 April 1989 - -Ty Coon, President of Vice - - - -This General Public License does not permit incorporating your program into - -proprietary programs. If your program is a subroutine library, you may consider - -it more useful to permit linking proprietary applications with the library. If - -this is what you want to do, use the GNU Library General Public License instead - -of this License. - - - ---- - - - -GNU General Public License v3.0 or later - -(Bash 5.0.0, GNU Core Utilities v8.31, libcap-doc 2.27, multipath-tools 0.8.1) - - - -GNU GENERAL PUBLIC LICENSE - -========================== - - - -Version 3,  29 June 2007 - - - -Copyright (C) 2007 Free Software Foundation, Inc. - - - -Everyone is permitted to copy and distribute verbatim copies of this license - -document, but changing it is not allowed. - - - - - -Preamble - - - -The GNU General Public License is a free, copyleft license for software and other - -kinds of works. - - - -The licenses for most software and other practical works are designed to take - -away your freedom to share and change the works. By contrast, the GNU General - -Public License is intended to guarantee your freedom to share and change all - -versions of a program--to make sure it remains free software for all its users. - -We, the Free Software Foundation, use the GNU General Public License for most of - -our software; it applies also to any other work released this way by its authors. - -You can apply it to your programs, too. - - - -When we speak of free software, we are referring to freedom, not price. Our - -General Public Licenses are designed to make sure that you have the freedom to - -distribute copies of free software (and charge for them if you wish), that you - -receive source code or can get it if you want it, that you can change the - -software or use pieces of it in new free programs, and that you know you can do - -these things. - - - -To protect your rights, we need to prevent others from denying you these rights - -or asking you to surrender the rights. Therefore, you have certain - -responsibilities if you distribute copies of the software, or if you modify it: - -responsibilities to respect the freedom of others. - - - -For example, if you distribute copies of such a program, whether gratis or for a - -fee, you must pass on to the recipients the same freedoms that you received. You - -must make sure that they, too, receive or can get the source code. And you must - -show them these terms so they know their rights. - - - -Developers that use the GNU GPL protect your rights with two steps: (1) assert - -copyright on the software, and (2) offer you this License giving you legal - -permission to copy, distribute and/or modify it. - - - -For the developers' and authors' protection, the GPL clearly explains that there - -is no warranty for this free software. For both users' and authors' sake, the GPL - -requires that modified versions be marked as changed, so that their problems will - -not be attributed erroneously to authors of previous versions. - - - -Some devices are designed to deny users access to install or run modified - -versions of the software inside them, although the manufacturer can do so. This - -is fundamentally incompatible with the aim of protecting users' freedom to change - -the software. The systematic pattern of such abuse occurs in the area of products - -for individuals to use, which is precisely where it is most unacceptable. - -Therefore, we have designed this version of the GPL to prohibit the practice for - -those products. If such problems arise substantially in other domains, we stand - -ready to extend this provision to those domains in future versions of the GPL, as - -needed to protect the freedom of users. - - - -Finally, every program is threatened constantly by software patents. States - -should not allow patents to restrict development and use of software on - -general-purpose computers, but in those that do, we wish to avoid the special - -danger that patents applied to a free program could make it effectively - -proprietary. To prevent this, the GPL assures that patents cannot be used to - -render the program non-free. - - - -The precise terms and conditions for copying, distribution and modification - -follow. - - - - - -TERMS AND CONDITIONS - - - -0. Definitions. - - - -“This License” refers to version 3 of the GNU General Public License. - - - -“Copyright” also means copyright-like laws that apply to other kinds of works, - -such as semiconductor masks. - - - -“The Program” refers to any copyrightable work licensed under this License. Each - -licensee is addressed as “you”. “Licensees” and “recipients” may be individuals - -or organizations. - - - -To “modify” a work means to copy from or adapt all or part of the work in a - -fashion requiring copyright permission, other than the making of an exact copy. - -The resulting work is called a “modified version” of the earlier work or a work - -“based on” the earlier work. - - - -A “covered work” means either the unmodified Program or a work based on the - -Program. - - - -To “propagate” a work means to do anything with it that, without permission, - -would make you directly or secondarily liable for infringement under applicable - -copyright law, except executing it on a computer or modifying a private copy. - -Propagation includes copying, distribution (with or without modification), making - -available to the public, and in some countries other activities as well. - - - -To “convey” a work means any kind of propagation that enables other parties to - -make or receive copies. Mere interaction with a user through a computer network, - -with no transfer of a copy, is not conveying. - - - -An interactive user interface displays “Appropriate Legal Notices” to the extent - -that it includes a convenient and prominently visible feature that (1) displays - -an appropriate copyright notice, and (2) tells the user that there is no warranty - -for the work (except to the extent that warranties are provided), that licensees - -may convey the work under this License, and how to view a copy of this License. - -If the interface presents a list of user commands or options, such as a menu, a - -prominent item in the list meets this criterion. - - - -1. Source Code. - - - -The “source code” for a work means the preferred form of the work for making - -modifications to it. “Object code” means any non-source form of a work. - - - -A “Standard Interface” means an interface that either is an official standard - -defined by a recognized standards body, or, in the case of interfaces specified - -for a particular programming language, one that is widely used among developers - -working in that language. - - - -The “System Libraries” of an executable work include anything, other than the - -work as a whole, that (a) is included in the normal form of packaging a Major - -Component, but which is not part of that Major Component, and (b) serves only to - -enable use of the work with that Major Component, or to implement a Standard - -Interface for which an implementation is available to the public in source code - -form. A “Major Component”, in this context, means a major essential component - -(kernel, window system, and so on) of the specific operating system (if any) on - -which the executable work runs, or a compiler used to produce the work, or an - -object code interpreter used to run it. - - - -The “Corresponding Source” for a work in object code form means all the source - -code needed to generate, install, and (for an executable work) run the object - -code and to modify the work, including scripts to control those activities. - -However, it does not include the work's System Libraries, or general-purpose - -tools or generally available free programs which are used unmodified in - -performing those activities but which are not part of the work. For example, - -Corresponding Source includes interface definition files associated with source - -files for the work, and the source code for shared libraries and dynamically - -linked subprograms that the work is specifically designed to require, such as by - -intimate data communication or control flow between those subprograms and other - -parts of the work. - - - -The Corresponding Source need not include anything that users can regenerate - -automatically from other parts of the Corresponding Source. - - - -The Corresponding Source for a work in source code form is that same work. - - - -2. Basic Permissions. - - - -All rights granted under this License are granted for the term of copyright on - -the Program, and are irrevocable provided the stated conditions are met. This - -License explicitly affirms your unlimited permission to run the unmodified - -Program. The output from running a covered work is covered by this License only - -if the output, given its content, constitutes a covered work. This License - -acknowledges your rights of fair use or other equivalent, as provided by - -copyright law. - - - -You may make, run and propagate covered works that you do not convey, without - -conditions so long as your license otherwise remains in force. You may convey - -covered works to others for the sole purpose of having them make modifications - -exclusively for you, or provide you with facilities for running those works, - -provided that you comply with the terms of this License in conveying all material - -for which you do not control copyright. Those thus making or running the covered - -works for you must do so exclusively on your behalf, under your direction and - -control, on terms that prohibit them from making any copies of your copyrighted - -material outside their relationship with you. - - - -Conveying under any other circumstances is permitted solely under the conditions - -stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - - - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - - -No covered work shall be deemed part of an effective technological measure under - -any applicable law fulfilling obligations under article 11 of the WIPO copyright - -treaty adopted on 20 December 1996, or similar laws prohibiting or restricting - -circumvention of such measures. - - - -When you convey a covered work, you waive any legal power to forbid circumvention - -of technological measures to the extent such circumvention is effected by - -exercising rights under this License with respect to the covered work, and you - -disclaim any intention to limit operation or modification of the work as a means - -of enforcing, against the work's users, your or third parties' legal rights to - -forbid circumvention of technological measures. - - - -4. Conveying Verbatim Copies. - - - -You may convey verbatim copies of the Program's source code as you receive it, in - -any medium, provided that you conspicuously and appropriately publish on each - -copy an appropriate copyright notice; keep intact all notices stating that this - -License and any non-permissive terms added in accord with section 7 apply to the - -code; keep intact all notices of the absence of any warranty; and give all - -recipients a copy of this License along with the Program. - - - -You may charge any price or no price for each copy that you convey, and you may - -offer support or warranty protection for a fee. - - - -5. Conveying Modified Source Versions. - - - -You may convey a work based on the Program, or the modifications to produce it - -from the Program, in the form of source code under the terms of section 4, - -provided that you also meet all of these conditions: - - - - * a) The work must carry prominent notices stating that you modified it, and - - giving a relevant date. - - - - * b) The work must carry prominent notices stating that it is released under - - this License and any conditions added under section 7. This requirement - - modifies the requirement in section 4 to “keep intact all notices”. - - - - * c) You must license the entire work, as a whole, under this License to anyone - - who comes into possession of a copy. This License will therefore apply, along - - with any applicable section 7 additional terms, to the whole of the work, and - - all its parts, regardless of how they are packaged. This License gives no - - permission to license the work in any other way, but it does not invalidate - - such permission if you have separately received it. - - - - * d) If the work has interactive user interfaces, each must display Appropriate - - Legal Notices; however, if the Program has interactive interfaces that do not - - display Appropriate Legal Notices, your work need not make them do so. - - - -A compilation of a covered work with other separate and independent works, which - -are not by their nature extensions of the covered work, and which are not - -combined with it such as to form a larger program, in or on a volume of a storage - -or distribution medium, is called an “aggregate” if the compilation and its - -resulting copyright are not used to limit the access or legal rights of the - -compilation's users beyond what the individual works permit. Inclusion of a - -covered work in an aggregate does not cause this License to apply to the other - -parts of the aggregate. - - - -6. Conveying Non-Source Forms. - - - -You may convey a covered work in object code form under the terms of sections 4 - -and 5, provided that you also convey the machine-readable Corresponding Source - -under the terms of this License, in one of these ways: - - - - * a) Convey the object code in, or embodied in, a physical product (including a - - physical distribution medium), accompanied by the Corresponding Source fixed - - on a durable physical medium customarily used for software interchange. - - - - * b) Convey the object code in, or embodied in, a physical product (including a - - physical distribution medium), accompanied by a written offer, valid for at - - least three years and valid for as long as you offer spare parts or customer - - support for that product model, to give anyone who possesses the object code - - either (1) a copy of the Corresponding Source for all the software in the - - product that is covered by this License, on a durable physical medium - - customarily used for software interchange, for a price no more than your - - reasonable cost of physically performing this conveying of source, or (2) - - access to copy the Corresponding Source from a network server at no charge. - - - - * c) Convey individual copies of the object code with a copy of the written - - offer to provide the Corresponding Source. This alternative is allowed only - - occasionally and noncommercially, and only if you received the object code - - with such an offer, in accord with subsection 6b. - - - - * d) Convey the object code by offering access from a designated place (gratis - - or for a charge), and offer equivalent access to the Corresponding Source in - - the same way through the same place at no further charge. You need not - - require recipients to copy the Corresponding Source along with the object - - code. If the place to copy the object code is a network server, the - - Corresponding Source may be on a different server (operated by you or a third - - party) that supports equivalent copying facilities, provided you maintain - - clear directions next to the object code saying where to find the - - Corresponding Source. Regardless of what server hosts the Corresponding - - Source, you remain obligated to ensure that it is available for as long as - - needed to satisfy these requirements. - - - - * e) Convey the object code using peer-to-peer transmission, provided you - - inform other peers where the object code and Corresponding Source of the work - - are being offered to the general public at no charge under subsection 6d. - - - -A separable portion of the object code, whose source code is excluded from the - -Corresponding Source as a System Library, need not be included in conveying the - -object code work. - - - -A “User Product” is either (1) a “consumer product”, which means any tangible - -personal property which is normally used for personal, family, or household - -purposes, or (2) anything designed or sold for incorporation into a dwelling. In - -determining whether a product is a consumer product, doubtful cases shall be - -resolved in favor of coverage. For a particular product received by a particular - -user, “normally used” refers to a typical or common use of that class of product, - -regardless of the status of the particular user or of the way in which the - -particular user actually uses, or expects or is expected to use, the product. A - -product is a consumer product regardless of whether the product has substantial - -commercial, industrial or non-consumer uses, unless such uses represent the only - -significant mode of use of the product. - - - -“Installation Information” for a User Product means any methods, procedures, - -authorization keys, or other information required to install and execute modified - -versions of a covered work in that User Product from a modified version of its - -Corresponding Source. The information must suffice to ensure that the continued - -functioning of the modified object code is in no case prevented or interfered - -with solely because modification has been made. - - - -If you convey an object code work under this section in, or with, or specifically - -for use in, a User Product, and the conveying occurs as part of a transaction in - -which the right of possession and use of the User Product is transferred to the - -recipient in perpetuity or for a fixed term (regardless of how the transaction is - -characterized), the Corresponding Source conveyed under this section must be - -accompanied by the Installation Information. But this requirement does not apply - -if neither you nor any third party retains the ability to install modified object - -code on the User Product (for example, the work has been installed in ROM). - - - -The requirement to provide Installation Information does not include a - -requirement to continue to provide support service, warranty, or updates for a - -work that has been modified or installed by the recipient, or for the User - -Product in which it has been modified or installed. Access to a network may be - -denied when the modification itself materially and adversely affects the - -operation of the network or violates the rules and protocols for communication - -across the network. - - - -Corresponding Source conveyed, and Installation Information provided, in accord - -with this section must be in a format that is publicly documented (and with an - -implementation available to the public in source code form), and must require no - -special password or key for unpacking, reading or copying. - - - -7. Additional Terms. - - - -“Additional permissions” are terms that supplement the terms of this License by - -making exceptions from one or more of its conditions. Additional permissions that - -are applicable to the entire Program shall be treated as though they were - -included in this License, to the extent that they are valid under applicable law. - -If additional permissions apply only to part of the Program, that part may be - -used separately under those permissions, but the entire Program remains governed - -by this License without regard to the additional permissions. - - - -When you convey a copy of a covered work, you may at your option remove any - -additional permissions from that copy, or from any part of it. (Additional - -permissions may be written to require their own removal in certain cases when you - -modify the work.) You may place additional permissions on material, added by you - -to a covered work, for which you have or can give appropriate copyright - -permission. - - - -Notwithstanding any other provision of this License, for material you add to a - -covered work, you may (if authorized by the copyright holders of that material) - -supplement the terms of this License with terms: - - - - * a) Disclaiming warranty or limiting liability differently from the terms of - - sections 15 and 16 of this License; or - - - - * b) Requiring preservation of specified reasonable legal notices or author - - attributions in that material or in the Appropriate Legal Notices displayed - - by works containing it; or - - - - * c) Prohibiting misrepresentation of the origin of that material, or requiring - - that modified versions of such material be marked in reasonable ways as - - different from the original version; or - - - - * d) Limiting the use for publicity purposes of names of licensors or authors - - of the material; or - - - - * e) Declining to grant rights under trademark law for use of some trade names, - - trademarks, or service marks; or - - - - * f) Requiring indemnification of licensors and authors of that material by - - anyone who conveys the material (or modified versions of it) with contractual - - assumptions of liability to the recipient, for any liability that these - - contractual assumptions directly impose on those licensors and authors. - - - -All other non-permissive additional terms are considered “further restrictions” - -within the meaning of section 10. If the Program as you received it, or any part - -of it, contains a notice stating that it is governed by this License along with a - -term that is a further restriction, you may remove that term. If a license - -document contains a further restriction but permits relicensing or conveying - -under this License, you may add to a covered work material governed by the terms - -of that license document, provided that the further restriction does not survive - -such relicensing or conveying. - - - -If you add terms to a covered work in accord with this section, you must place, - -in the relevant source files, a statement of the additional terms that apply to - -those files, or a notice indicating where to find the applicable terms. - - - -Additional terms, permissive or non-permissive, may be stated in the form of a - -separately written license, or stated as exceptions; the above requirements apply - -either way. - - - -8. Termination. - - - -You may not propagate or modify a covered work except as expressly provided under - -this License. Any attempt otherwise to propagate or modify it is void, and will - -automatically terminate your rights under this License (including any patent - -licenses granted under the third paragraph of section 11). - - - -However, if you cease all violation of this License, then your license from a - -particular copyright holder is reinstated (a) provisionally, unless and until the - -copyright holder explicitly and finally terminates your license, and (b) - -permanently, if the copyright holder fails to notify you of the violation by some - -reasonable means prior to 60 days after the cessation. - - - -Moreover, your license from a particular copyright holder is reinstated - -permanently if the copyright holder notifies you of the violation by some - -reasonable means, this is the first time you have received notice of violation of - -this License (for any work) from that copyright holder, and you cure the - -violation prior to 30 days after your receipt of the notice. - - - -Termination of your rights under this section does not terminate the licenses of - -parties who have received copies or rights from you under this License. If your - -rights have been terminated and not permanently reinstated, you do not qualify to - -receive new licenses for the same material under section 10. - - - -9. Acceptance Not Required for Having Copies. - - - -You are not required to accept this License in order to receive or run a copy of - -the Program. Ancillary propagation of a covered work occurring solely as a - -consequence of using peer-to-peer transmission to receive a copy likewise does - -not require acceptance. However, nothing other than this License grants you - -permission to propagate or modify any covered work. These actions infringe - -copyright if you do not accept this License. Therefore, by modifying or - -propagating a covered work, you indicate your acceptance of this License to do - -so. - - - -10. Automatic Licensing of Downstream Recipients. - - - -Each time you convey a covered work, the recipient automatically receives a - -license from the original licensors, to run, modify and propagate that work, - -subject to this License. You are not responsible for enforcing compliance by - -third parties with this License. - - - -An “entity transaction” is a transaction transferring control of an organization, - -or substantially all assets of one, or subdividing an organization, or merging - -organizations. If propagation of a covered work results from an entity - -transaction, each party to that transaction who receives a copy of the work also - -receives whatever licenses to the work the party's predecessor in interest had or - -could give under the previous paragraph, plus a right to possession of the - -Corresponding Source of the work from the predecessor in interest, if the - -predecessor has it or can get it with reasonable efforts. - - - -You may not impose any further restrictions on the exercise of the rights granted - -or affirmed under this License. For example, you may not impose a license fee, - -royalty, or other charge for exercise of rights granted under this License, and - -you may not initiate litigation (including a cross-claim or counterclaim in a - -lawsuit) alleging that any patent claim is infringed by making, using, selling, - -offering for sale, or importing the Program or any portion of it. - - - -11. Patents. - - - -A “contributor” is a copyright holder who authorizes use under this License of - -the Program or a work on which the Program is based. The work thus licensed is - -called the contributor's “contributor version”. - - - -A contributor's “essential patent claims” are all patent claims owned or - -controlled by the contributor, whether already acquired or hereafter acquired, - -that would be infringed by some manner, permitted by this License, of making, - -using, or selling its contributor version, but do not include claims that would - -be infringed only as a consequence of further modification of the contributor - -version. For purposes of this definition, “control” includes the right to grant - -patent sublicenses in a manner consistent with the requirements of this License. - - - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent - -license under the contributor's essential patent claims, to make, use, sell, - -offer for sale, import and otherwise run, modify and propagate the contents of - -its contributor version. - - - -In the following three paragraphs, a “patent license” is any express agreement or - -commitment, however denominated, not to enforce a patent (such as an express - -permission to practice a patent or covenant not to sue for patent infringement). - -To “grant” such a patent license to a party means to make such an agreement or - -commitment not to enforce a patent against the party. - - - -If you convey a covered work, knowingly relying on a patent license, and the - -Corresponding Source of the work is not available for anyone to copy, free of - -charge and under the terms of this License, through a publicly available network - -server or other readily accessible means, then you must either (1) cause the - -Corresponding Source to be so available, or (2) arrange to deprive yourself of - -the benefit of the patent license for this particular work, or (3) arrange, in a - -manner consistent with the requirements of this License, to extend the patent - -license to downstream recipients. “Knowingly relying” means you have actual - -knowledge that, but for the patent license, your conveying the covered work in a - -country, or your recipient's use of the covered work in a country, would infringe - -one or more identifiable patents in that country that you have reason to believe - -are valid. - - - -If, pursuant to or in connection with a single transaction or arrangement, you - -convey, or propagate by procuring conveyance of, a covered work, and grant a - -patent license to some of the parties receiving the covered work authorizing them - -to use, propagate, modify or convey a specific copy of the covered work, then the - -patent license you grant is automatically extended to all recipients of the - -covered work and works based on it. - - - -A patent license is “discriminatory” if it does not include within the scope of - -its coverage, prohibits the exercise of, or is conditioned on the non-exercise of - -one or more of the rights that are specifically granted under this License. You - -may not convey a covered work if you are a party to an arrangement with a third - -party that is in the business of distributing software, under which you make - -payment to the third party based on the extent of your activity of conveying the - -work, and under which the third party grants, to any of the parties who would - -receive the covered work from you, a discriminatory patent license (a) in - -connection with copies of the covered work conveyed by you (or copies made from - -those copies), or (b) primarily for and in connection with specific products or - -compilations that contain the covered work, unless you entered into that - -arrangement, or that patent license was granted, prior to 28 March 2007. - - - -Nothing in this License shall be construed as excluding or limiting any implied - -license or other defenses to infringement that may otherwise be available to you - -under applicable patent law. - - - -12. No Surrender of Others' Freedom. - - - -If conditions are imposed on you (whether by court order, agreement or otherwise) - -that contradict the conditions of this License, they do not excuse you from the - -conditions of this License. If you cannot convey a covered work so as to satisfy - -simultaneously your obligations under this License and any other pertinent - -obligations, then as a consequence you may not convey it at all. For example, if - -you agree to terms that obligate you to collect a royalty for further conveying - -from those to whom you convey the Program, the only way you could satisfy both - -those terms and this License would be to refrain entirely from conveying the - -Program. - - - -13. Use with the GNU Affero General Public License. - - - -Notwithstanding any other provision of this License, you have permission to link - -or combine any covered work with a work licensed under version 3 of the GNU - -Affero General Public License into a single combined work, and to convey the - -resulting work. The terms of this License will continue to apply to the part - -which is the covered work, but the special requirements of the GNU Affero General - -Public License, section 13, concerning interaction through a network will apply - -to the combination as such. - - - -14. Revised Versions of this License. - - - -The Free Software Foundation may publish revised and/or new versions of the GNU - -General Public License from time to time. Such new versions will be similar in - -spirit to the present version, but may differ in detail to address new problems - -or concerns. - - - -Each version is given a distinguishing version number. If the Program specifies - -that a certain numbered version of the GNU General Public License “or any later - -version” applies to it, you have the option of following the terms and conditions - -either of that numbered version or of any later version published by the Free - -Software Foundation. If the Program does not specify a version number of the GNU - -General Public License, you may choose any version ever published by the Free - -Software Foundation. - - - -If the Program specifies that a proxy can decide which future versions of the GNU - -General Public License can be used, that proxy's public statement of acceptance - -of a version permanently authorizes you to choose that version for the Program. - - - -Later license versions may give you additional or different permissions. However, - -no additional obligations are imposed on any author or copyright holder as a - -result of your choosing to follow a later version. - - - -15. Disclaimer of Warranty. - - - -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. - -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER - -PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER - -EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE - -QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE - -DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - -16. Limitation of Liability. - - - -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY - -COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS - -PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, - -INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE - -THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED - -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE - -PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY - -HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - - -17. Interpretation of Sections 15 and 16. - - - -If the disclaimer of warranty and limitation of liability provided above cannot - -be given local legal effect according to their terms, reviewing courts shall - -apply local law that most closely approximates an absolute waiver of all civil - -liability in connection with the Program, unless a warranty or assumption of - -liability accompanies a copy of the Program in return for a fee. - - - -END OF TERMS AND CONDITIONS - - - - - -How to Apply These Terms to Your New Programs - - - -If you develop a new program, and you want it to be of the greatest possible use - -to the public, the best way to achieve this is to make it free software which - -everyone can redistribute and change under these terms. - - - -To do so, attach the following notices to the program. It is safest to attach - -them to the start of each source file to most effectively state the exclusion of - -warranty; and each file should have at least the “copyright” line and a pointer - -to where the full notice is found. - - - - - - Copyright (C) - - - - This program is free software: you can redistribute it and/or modify - - it under the terms of the GNU General Public License as published by - - the Free Software Foundation, either version 3 of the License, or - - (at your option) any later version. - - - - This program is distributed in the hope that it will be useful, - - but WITHOUT ANY WARRANTY; without even the implied warranty of - - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - - GNU General Public License for more details. - - - - You should have received a copy of the GNU General Public License - - along with this program. If not, see . - - - -Also add information on how to contact you by electronic and paper mail. - - - -If the program does terminal interaction, make it output a short notice like this - -when it starts in an interactive mode: - - - - Copyright (C) - - - - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - - This is free software, and you are welcome to redistribute it - - under certain conditions; type `show c' for details. - - - -The hypothetical commands `show w' and `show c' should show the appropriate parts - -of the General Public License. Of course, your program's commands might be - -different; for a GUI interface, you would use an “about box”. - - - -You should also get your employer (if you work as a programmer) or school, if - -any, to sign a “copyright disclaimer” for the program, if necessary. For more - -information on this, and how to apply and follow the GNU GPL, see - -. - - - -The GNU General Public License does not permit incorporating your program into - -proprietary programs. If your program is a subroutine library, you may consider - -it more useful to permit linking proprietary applications with the library. If - -this is what you want to do, use the GNU Lesser General Public License instead of - -this License. But first, please read - -. - - - ---- - - - -GNU Lesser General Public License v2.1 or later - -(Linux Extended Attributes 2.2.52) - - - -GNU Lesser General Public License - -================================= - - - -Version 2.1, February 1999 - - - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - - - - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - - - Everyone is permitted to copy and distribute verbatim copies - - - - of this license document, but changing it is not allowed. - - - - [This is the first released version of the Lesser GPL. It also counts - - - - as the successor of the GNU Library Public License, version 2, hence - - - - the version number 2.1.] - - - - - -Preamble - --------- - - - -The licenses for most software are designed to take away your freedom to share - -and change it. By contrast, the GNU General Public Licenses are intended to - -guarantee your freedom to share and change free software--to make sure the - -software is free for all its users. - - - -This license, the Lesser General Public License, applies to some specially - -designated software packages--typically libraries--of the Free Software - -Foundation and other authors who decide to use it. You can use it too, but we - -suggest you first think carefully about whether this license or the ordinary - -General Public License is the better strategy to use in any particular case, - -based on the explanations below. - - - -When we speak of free software, we are referring to freedom of use, not price. - -Our General Public Licenses are designed to make sure that you have the freedom - -to distribute copies of free software (and charge for this service if you wish); - -that you receive source code or can get it if you want it; that you can change - -the software and use pieces of it in new free programs; and that you are informed - -that you can do these things. - - - -To protect your rights, we need to make restrictions that forbid distributors to - -deny you these rights or to ask you to surrender these rights. These restrictions - -translate to certain responsibilities for you if you distribute copies of the - -library or if you modify it. - - - -For example, if you distribute copies of the library, whether gratis or for a - -fee, you must give the recipients all the rights that we gave you. You must make - -sure that they, too, receive or can get the source code. If you link other code - -with the library, you must provide complete object files to the recipients, so - -that they can relink them with the library after making changes to the library - -and recompiling it. And you must show them these terms so they know their rights. - - - -We protect your rights with a two-step method: (1) we copyright the library, and - -(2) we offer you this license, which gives you legal permission to copy, - -distribute and/or modify the library. - - - -To protect each distributor, we want to make it very clear that there is no - -warranty for the free library. Also, if the library is modified by someone else - -and passed on, the recipients should know that what they have is not the original - -version, so that the original author's reputation will not be affected by - -problems that might be introduced by others. - - - -Finally, software patents pose a constant threat to the existence of any free - -program. We wish to make sure that a company cannot effectively restrict the - -users of a free program by obtaining a restrictive license from a patent holder. - -Therefore, we insist that any patent license obtained for a version of the - -library must be consistent with the full freedom of use specified in this - -license. - - - -Most GNU software, including some libraries, is covered by the ordinary GNU - -General Public License. This license, the GNU Lesser General Public License, - -applies to certain designated libraries, and is quite different from the ordinary - -General Public License. We use this license for certain libraries in order to - -permit linking those libraries into non-free programs. - - - -When a program is linked with a library, whether statically or using a shared - -library, the combination of the two is legally speaking a combined work, a - -derivative of the original library. The ordinary General Public License therefore - -permits such linking only if the entire combination fits its criteria of freedom. - -The Lesser General Public License permits more lax criteria for linking other - -code with the library. - - - -We call this license the "Lesser" General Public License because it does Less to - -protect the user's freedom than the ordinary General Public License. It also - -provides other free software developers Less of an advantage over competing - -non-free programs. These disadvantages are the reason we use the ordinary General - -Public License for many libraries. However, the Lesser license provides - -advantages in certain special circumstances. - - - -For example, on rare occasions, there may be a special need to encourage the - -widest possible use of a certain library, so that it becomes a de-facto standard. - -To achieve this, non-free programs must be allowed to use the library. A more - -frequent case is that a free library does the same job as widely used non-free - -libraries. In this case, there is little to gain by limiting the free library to - -free software only, so we use the Lesser General Public License. - - - -In other cases, permission to use a particular library in non-free programs - -enables a greater number of people to use a large body of free software. For - -example, permission to use the GNU C Library in non-free programs enables many - -more people to use the whole GNU operating system, as well as its variant, the - -GNU/Linux operating system. - - - -Although the Lesser General Public License is Less protective of the users' - -freedom, it does ensure that the user of a program that is linked with the - -Library has the freedom and the wherewithal to run that program using a modified - -version of the Library. - - - -The precise terms and conditions for copying, distribution and modification - -follow. Pay close attention to the difference between a "work based on the - -library" and a "work that uses the library". The former contains code derived - -from the library, whereas the latter must be combined with the library in order - -to run. - - - - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - ---------------------------------------------------------------- - - - -0. This License Agreement applies to any software library or other program which - -contains a notice placed by the copyright holder or other authorized party saying - -it may be distributed under the terms of this Lesser General Public License (also - -called "this License"). Each licensee is addressed as "you". - - - -A "library" means a collection of software functions and/or data prepared so as - -to be conveniently linked with application programs (which use some of those - -functions and data) to form executables. - - - -The "Library", below, refers to any such software library or work which has been - -distributed under these terms. A "work based on the Library" means either the - -Library or any derivative work under copyright law: that is to say, a work - -containing the Library or a portion of it, either verbatim or with modifications - -and/or translated straightforwardly into another language. (Hereinafter, - -translation is included without limitation in the term "modification".) - - - -"Source code" for a work means the preferred form of the work for making - -modifications to it. For a library, complete source code means all the source - -code for all modules it contains, plus any associated interface definition files, - -plus the scripts used to control compilation and installation of the library. - - - -Activities other than copying, distribution and modification are not covered by - -this License; they are outside its scope. The act of running a program using the - -Library is not restricted, and output from such a program is covered only if its - -contents constitute a work based on the Library (independent of the use of the - -Library in a tool for writing it). Whether that is true depends on what the - -Library does and what the program that uses the Library does. - - - -1. You may copy and distribute verbatim copies of the Library's complete source - -code as you receive it, in any medium, provided that you conspicuously and - -appropriately publish on each copy an appropriate copyright notice and disclaimer - -of warranty; keep intact all the notices that refer to this License and to the - -absence of any warranty; and distribute a copy of this License along with the - -Library. - - - -You may charge a fee for the physical act of transferring a copy, and you may at - -your option offer warranty protection in exchange for a fee. - - - -2. You may modify your copy or copies of the Library or any portion of it, thus - -forming a work based on the Library, and copy and distribute such modifications - -or work under the terms of Section 1 above, provided that you also meet all of - -these conditions: - - - - a) The modified work must itself be a software library. - - - - b) You must cause the files modified to carry prominent notices stating - - that you changed the files and the date of any change. - - - - c) You must cause the whole of the work to be licensed at no charge to all - - third parties under the terms of this License. - - - - d) If a facility in the modified Library refers to a function or a table of - - data to be supplied by an application program that uses the facility, other - - than as an argument passed when the facility is invoked, then you must make - - a good faith effort to ensure that, in the event an application does not - - supply such function or table, the facility still operates, and performs - - whatever part of its purpose remains meaningful. - - - - (For example, a function in a library to compute square roots has a purpose - - that is entirely well-defined independent of the application. Therefore, - - Subsection 2d requires that any application-supplied function or table used - - by this function must be optional: if the application does not supply it, - - the square root function must still compute square roots.) - - - - These requirements apply to the modified work as a whole. If identifiable - - sections of that work are not derived from the Library, and can be - - reasonably considered independent and separate works in themselves, then - - this License, and its terms, do not apply to those sections when you - - distribute them as separate works. But when you distribute the same - - sections as part of a whole which is a work based on the Library, the - - distribution of the whole must be on the terms of this License, whose - - permissions for other licensees extend to the entire whole, and thus to - - each and every part regardless of who wrote it. - - - - Thus, it is not the intent of this section to claim rights or contest your - - rights to work written entirely by you; rather, the intent is to exercise - - the right to control the distribution of derivative or collective works - - based on the Library. - - - - In addition, mere aggregation of another work not based on the Library with - - the Library (or with a work based on the Library) on a volume of a storage - - or distribution medium does not bring the other work under the scope of - - this License. - - - -3. You may opt to apply the terms of the ordinary GNU General Public License - -instead of this License to a given copy of the Library. To do this, you must - -alter all the notices that refer to this License, so that they refer to the - -ordinary GNU General Public License, version 2, instead of to this License. (If a - -newer version than version 2 of the ordinary GNU General Public License has - -appeared, then you can specify that version instead if you wish.) Do not make any - -other change in these notices. - - - -Once this change is made in a given copy, it is irreversible for that copy, so - -the ordinary GNU General Public License applies to all subsequent copies and - -derivative works made from that copy. - - - -This option is useful when you wish to copy part of the code of the Library into - -a program that is not a library. - - - -4. You may copy and distribute the Library (or a portion or derivative of it, - -under Section 2) in object code or executable form under the terms of Sections 1 - -and 2 above provided that you accompany it with the complete corresponding - -machine-readable source code, which must be distributed under the terms of - -Sections 1 and 2 above on a medium customarily used for software interchange. - - - -If distribution of object code is made by offering access to copy from a - -designated place, then offering equivalent access to copy the source code from - -the same place satisfies the requirement to distribute the source code, even - -though third parties are not compelled to copy the source along with the object - -code. - - - -5. A program that contains no derivative of any portion of the Library, but is - -designed to work with the Library by being compiled or linked with it, is called - -a "work that uses the Library". Such a work, in isolation, is not a derivative - -work of the Library, and therefore falls outside the scope of this License. - - - -However, linking a "work that uses the Library" with the Library creates an - -executable that is a derivative of the Library (because it contains portions of - -the Library), rather than a "work that uses the library". The executable is - -therefore covered by this License. Section 6 states terms for distribution of - -such executables. - - - -When a "work that uses the Library" uses material from a header file that is part - -of the Library, the object code for the work may be a derivative work of the - -Library even though the source code is not. Whether this is true is especially - -significant if the work can be linked without the Library, or if the work is - -itself a library. The threshold for this to be true is not precisely defined by - -law. - - - -If such an object file uses only numerical parameters, data structure layouts and - -accessors, and small macros and small inline functions (ten lines or less in - -length), then the use of the object file is unrestricted, regardless of whether - -it is legally a derivative work. (Executables containing this object code plus - -portions of the Library will still fall under Section 6.) - - - -Otherwise, if the work is a derivative of the Library, you may distribute the - -object code for the work under the terms of Section 6. Any executables containing - -that work also fall under Section 6, whether or not they are linked directly with - -the Library itself. - - - -6. As an exception to the Sections above, you may also combine or link a "work - -that uses the Library" with the Library to produce a work containing portions of - -the Library, and distribute that work under terms of your choice, provided that - -the terms permit modification of the work for the customer's own use and reverse - -engineering for debugging such modifications. - - - -You must give prominent notice with each copy of the work that the Library is - -used in it and that the Library and its use are covered by this License. You must - -supply a copy of this License. If the work during execution displays copyright - -notices, you must include the copyright notice for the Library among them, as - -well as a reference directing the user to the copy of this License. Also, you - -must do one of these things: - - - - a) Accompany the work with the complete corresponding machine-readable - - source code for the Library including whatever changes were used in the - - work (which must be distributed under Sections 1 and 2 above); and, if the - - work is an executable linked with the Library, with the complete - - machine-readable "work that uses the Library", as object code and/or source - - code, so that the user can modify the Library and then relink to produce a - - modified executable containing the modified Library. (It is understood that - - the user who changes the contents of definitions files in the Library will - - not necessarily be able to recompile the application to use the modified - - definitions.) - - - - b) Use a suitable shared library mechanism for linking with the Library. A - - suitable mechanism is one that (1) uses at run time a copy of the library - - already present on the user's computer system, rather than copying library - - functions into the executable, and (2) will operate properly with a - - modified version of the library, if the user installs one, as long as the - - modified version is interface-compatible with the version that the work was - - made with. - - - - c) Accompany the work with a written offer, valid for at least three years, - - to give the same user the materials specified in Subsection 6a, above, for - - a charge no more than the cost of performing this distribution. - - - - d) If distribution of the work is made by offering access to copy from a - - designated place, offer equivalent access to copy the above specified - - materials from the same place. - - - - e) Verify that the user has already received a copy of these materials or - - that you have already sent this user a copy. - - - -For an executable, the required form of the "work that uses the Library" must - -include any data and utility programs needed for reproducing the executable from - -it. However, as a special exception, the materials to be distributed need not - -include anything that is normally distributed (in either source or binary form) - -with the major components (compiler, kernel, and so on) of the operating system - -on which the executable runs, unless that component itself accompanies the - -executable. - - - -It may happen that this requirement contradicts the license restrictions of other - -proprietary libraries that do not normally accompany the operating system. Such a - -contradiction means you cannot use both them and the Library together in an - -executable that you distribute. - - - -7. You may place library facilities that are a work based on the Library - -side-by-side in a single library together with other library facilities not - -covered by this License, and distribute such a combined library, provided that - -the separate distribution of the work based on the Library and of the other - -library facilities is otherwise permitted, and provided that you do these two - -things: - - - - a) Accompany the combined library with a copy of the same work based on the - - Library, uncombined with any other library facilities. This must be - - distributed under the terms of the Sections above. - - - - b) Give prominent notice with the combined library of the fact that part of - - it is a work based on the Library, and explaining where to find the - - accompanying uncombined form of the same work. - - - -8. You may not copy, modify, sublicense, link with, or distribute the Library - -except as expressly provided under this License. Any attempt otherwise to copy, - -modify, sublicense, link with, or distribute the Library is void, and will - -automatically terminate your rights under this License. However, parties who have - -received copies, or rights, from you under this License will not have their - -licenses terminated so long as such parties remain in full compliance. - - - -9. You are not required to accept this License, since you have not signed it. - -However, nothing else grants you permission to modify or distribute the Library - -or its derivative works. These actions are prohibited by law if you do not accept - -this License. Therefore, by modifying or distributing the Library (or any work - -based on the Library), you indicate your acceptance of this License to do so, and - -all its terms and conditions for copying, distributing or modifying the Library - -or works based on it. - - - -10. Each time you redistribute the Library (or any work based on the Library), - -the recipient automatically receives a license from the original licensor to - -copy, distribute, link with or modify the Library subject to these terms and - -conditions. You may not impose any further restrictions on the recipients' - -exercise of the rights granted herein. You are not responsible for enforcing - -compliance by third parties with this License. - - - -11. If, as a consequence of a court judgment or allegation of patent infringement - -or for any other reason (not limited to patent issues), conditions are imposed on - -you (whether by court order, agreement or otherwise) that contradict the - -conditions of this License, they do not excuse you from the conditions of this - -License. If you cannot distribute so as to satisfy simultaneously your - -obligations under this License and any other pertinent obligations, then as a - -consequence you may not distribute the Library at all. For example, if a patent - -license would not permit royalty-free redistribution of the Library by all those - -who receive copies directly or indirectly through you, then the only way you - -could satisfy both it and this License would be to refrain entirely from - -distribution of the Library. - - - -If any portion of this section is held invalid or unenforceable under any - -particular circumstance, the balance of the section is intended to apply, and the - -section as a whole is intended to apply in other circumstances. - - - -It is not the purpose of this section to induce you to infringe any patents or - -other property right claims or to contest validity of any such claims; this - -section has the sole purpose of protecting the integrity of the free software - -distribution system which is implemented by public license practices. Many people - -have made generous contributions to the wide range of software distributed - -through that system in reliance on consistent application of that system; it is - -up to the author/donor to decide if he or she is willing to distribute software - -through any other system and a licensee cannot impose that choice. - - - -This section is intended to make thoroughly clear what is believed to be a - -consequence of the rest of this License. - - - -12. If the distribution and/or use of the Library is restricted in certain - -countries either by patents or by copyrighted interfaces, the original copyright - -holder who places the Library under this License may add an explicit geographical - -distribution limitation excluding those countries, so that distribution is - -permitted only in or among countries not thus excluded. In such case, this - -License incorporates the limitation as if written in the body of this License. - - - -13. The Free Software Foundation may publish revised and/or new versions of the - -Lesser General Public License from time to time. Such new versions will be - -similar in spirit to the present version, but may differ in detail to address new - -problems or concerns. - - - -Each version is given a distinguishing version number. If the Library specifies a - -version number of this License which applies to it and "any later version", you - -have the option of following the terms and conditions either of that version or - -of any later version published by the Free Software Foundation. If the Library - -does not specify a license version number, you may choose any version ever - -published by the Free Software Foundation. - - - -14. If you wish to incorporate parts of the Library into other free programs - -whose distribution conditions are incompatible with these, write to the author to - -ask for permission. For software which is copyrighted by the Free Software - -Foundation, write to the Free Software Foundation; we sometimes make exceptions - -for this. Our decision will be guided by the two goals of preserving the free - -status of all derivatives of our free software and of promoting the sharing and - -reuse of software generally. - - - -NO WARRANTY - - - -15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE - -LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED - -IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" - -WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - -PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE - -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF - -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - -16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL - -ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE - -LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, - -SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY - -TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING - -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF - -THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER - -PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - - - - -END OF TERMS AND CONDITIONS - - - - - -How to Apply These Terms to Your New Libraries - ----------------------------------------------- - - - -If you develop a new library, and you want it to be of the greatest possible use - -to the public, we recommend making it free software that everyone can - -redistribute and change. You can do so by permitting redistribution under these - -terms (or, alternatively, under the terms of the ordinary General Public - -License). - - - -To apply these terms, attach the following notices to the library. It is safest - -to attach them to the start of each source file to most effectively convey the - -exclusion of warranty; and each file should have at least the "copyright" line - -and a pointer to where the full notice is found. - - - - one line to give the library's name and an idea of what it does. - - - - Copyright (C) year name of author - - - - This library is free software; you can redistribute it and/or - - - - modify it under the terms of the GNU Lesser General Public - - - - License as published by the Free Software Foundation; either - - - - version 2.1 of the License, or (at your option) any later version. - - - - This library is distributed in the hope that it will be useful, - - - - but WITHOUT ANY WARRANTY; without even the implied warranty of - - - - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - - - - Lesser General Public License for more details. - - - - You should have received a copy of the GNU Lesser General Public - - - - License along with this library; if not, write to the Free Software - - - - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - - -Also add information on how to contact you by electronic and paper mail. - - - -You should also get your employer (if you work as a programmer) or your school, - -if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a - -sample; alter the names: - - - - Yoyodyne, Inc., hereby disclaims all copyright interest in - - - - the library `Frob' (a library for tweaking knobs) written - - - - by James Random Hacker. - - - - signature of Ty Coon, 1 April 1990 - - - - Ty Coon, President of Vice - - - -That's all there is to it! - - - ---- - - - -GNU Library General Public License v2 or later - -(keyutils 1.6, LVM2 2.02.184, Userspace RCU 0.11.0) - - - -GNU Library General Public License - -================================== - - - -Version 2, June 1991 - - - - Copyright (C) 1991 Free Software Foundation, Inc. - - - - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - - - Everyone is permitted to copy and distribute verbatim copies - - - - of this license document, but changing it is not allowed. - - - - [This is the first released version of the Library GPL. It is numbered 2 - - because it goes with version 2 of the ordinary GPL.] - - - - - -Preamble - --------- - - - -The licenses for most software are designed to take away your freedom to share - -and change it. By contrast, the GNU General Public Licenses are intended to - -guarantee your freedom to share and change free software--to make sure the - -software is free for all its users. - - - -This license, the Library General Public License, applies to some specially - -designated Free Software Foundation software, and to other libraries whose - -authors who decide to use it. You can use it for your libraries too. - - - -When we speak of free software, we are referring to freedom, not price. Our - -General Public Licenses are designed to make sure that you have the freedom to - -distribute copies of free software (and charge for this service if you wish), - -that you receive source code or can get it if you want it, that you can change - -the software or use pieces of it in new free programs; and that you know you can - -do these things. - - - -To protect your rights, we need to make restrictions that forbid anyone to deny - -you these rights or to ask you to surrender the rights. These restrictions - -translate to certain responsibilities for you if you distribute copies of the - -library, or if you modify it. - - - -For example, if you distribute copies of the library, whether gratis or for a - -fee, you must give the recipients all the rights that we gave you. You must make - -sure that they, too, receive or can get the source code. If you link a program - -with the library, you must provide complete object files to the recipients so - -that they can relink them with the library after making changes to the library - -and recompiling it. And you must show them these terms so they know their rights. - - - -Our method of protecting your rights has two steps: (1) copyright the library, - -and (2) offer you this license, which gives you legal permission to copy, - -distribute and/or modify the library. - - - -Also, for each distributor's protection, we want to make certain that everyone - -understands that there is no warranty for this free library. If the library is - -modified by someone else and passed on, we want its recipients to know that what - -they have is not the original version, so that any problems introduced by others - -will not reflect on the original authors' reputations. - - - -Finally, any free program is threatened constantly by software patents. We wish - -to avoid the danger that companies distributing free software will individually - -obtain patent licenses, thus in effect transforming the program into proprietary - -software. To prevent this, we have made it clear that any patent must be licensed - -for everyone's free use or not licensed at all. - - - -Most GNU software, including some libraries, is covered by the ordinary GNU - -General Public License, which was designed for utility programs. This license, - -the GNU Library General Public License, applies to certain designated libraries. - -This license is quite different from the ordinary one; be sure to read it in - -full, and don't assume that anything in it is the same as in the ordinary - -license. - - - -The reason we have a separate public license for some libraries is that they blur - -the distinction we usually make between modifying or adding to a program and - -simply using it. Linking a program with a library, without changing the library, - -is in some sense simply using the library, and is analogous to running a utility - -program or application program. However, in a textual and legal sense, the linked - -executable is a combined work, a derivative of the original library, and the - -ordinary General Public License treats it as such. - - - -Because of this blurred distinction, using the ordinary General Public License - -for libraries did not effectively promote software sharing, because most - -developers did not use the libraries. We concluded that weaker conditions might - -promote sharing better. - - - -However, unrestricted linking of non-free programs would deprive the users of - -those programs of all benefit from the free status of the libraries themselves. - -This Library General Public License is intended to permit developers of non-free - -programs to use free libraries, while preserving your freedom as a user of such - -programs to change the free libraries that are incorporated in them. (We have not - -seen how to achieve this as regards changes in header files, but we have achieved - -it as regards changes in the actual functions of the Library.) The hope is that - -this will lead to faster development of free libraries. - - - -The precise terms and conditions for copying, distribution and modification - -follow. Pay close attention to the difference between a "work based on the - -library" and a "work that uses the library". The former contains code derived - -from the library, while the latter only works together with the library. - - - -Note that it is possible for a library to be covered by the ordinary General - -Public License rather than by this special one. - - - - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - ---------------------------------------------------------------- - - - -0. This License Agreement applies to any software library which contains a notice - -placed by the copyright holder or other authorized party saying it may be - -distributed under the terms of this Library General Public License (also called - -"this License"). Each licensee is addressed as "you". - - - -A "library" means a collection of software functions and/or data prepared so as - -to be conveniently linked with application programs (which use some of those - -functions and data) to form executables. - - - -The "Library", below, refers to any such software library or work which has been - -distributed under these terms. A "work based on the Library" means either the - -Library or any derivative work under copyright law: that is to say, a work - -containing the Library or a portion of it, either verbatim or with modifications - -and/or translated straightforwardly into another language. (Hereinafter, - -translation is included without limitation in the term "modification".) - - - -"Source code" for a work means the preferred form of the work for making - -modifications to it. For a library, complete source code means all the source - -code for all modules it contains, plus any associated interface definition files, - -plus the scripts used to control compilation and installation of the library. - - - -Activities other than copying, distribution and modification are not covered by - -this License; they are outside its scope. The act of running a program using the - -Library is not restricted, and output from such a program is covered only if its - -contents constitute a work based on the Library (independent of the use of the - -Library in a tool for writing it). Whether that is true depends on what the - -Library does and what the program that uses the Library does. - - - -1. You may copy and distribute verbatim copies of the Library's complete source - -code as you receive it, in any medium, provided that you conspicuously and - -appropriately publish on each copy an appropriate copyright notice and disclaimer - -of warranty; keep intact all the notices that refer to this License and to the - -absence of any warranty; and distribute a copy of this License along with the - -Library. - - - -You may charge a fee for the physical act of transferring a copy, and you may at - -your option offer warranty protection in exchange for a fee. - - - -2. You may modify your copy or copies of the Library or any portion of it, thus - -forming a work based on the Library, and copy and distribute such modifications - -or work under the terms of Section 1 above, provided that you also meet all of - -these conditions: - - - - a) The modified work must itself be a software library. - - - - b) You must cause the files modified to carry prominent notices stating - - that you changed the files and the date of any change. - - - - c) You must cause the whole of the work to be licensed at no charge to all - - third parties under the terms of this License. - - - - d) If a facility in the modified Library refers to a function or a table of - - data to be supplied by an application program that uses the facility, other - - than as an argument passed when the facility is invoked, then you must make - - a good faith effort to ensure that, in the event an application does not - - supply such function or table, the facility still operates, and performs - - whatever part of its purpose remains meaningful. - - - - (For example, a function in a library to compute square roots has a purpose - - that is entirely well-defined independent of the application. Therefore, - - Subsection 2d requires that any application-supplied function or table used - - by this function must be optional: if the application does not supply it, - - the square root function must still compute square roots.) - - - - These requirements apply to the modified work as a whole. If identifiable - - sections of that work are not derived from the Library, and can be - - reasonably considered independent and separate works in themselves, then - - this License, and its terms, do not apply to those sections when you - - distribute them as separate works. But when you distribute the same - - sections as part of a whole which is a work based on the Library, the - - distribution of the whole must be on the terms of this License, whose - - permissions for other licensees extend to the entire whole, and thus to - - each and every part regardless of who wrote it. - - - - Thus, it is not the intent of this section to claim rights or contest your - - rights to work written entirely by you; rather, the intent is to exercise - - the right to control the distribution of derivative or collective works - - based on the Library. - - - - In addition, mere aggregation of another work not based on the Library with - - the Library (or with a work based on the Library) on a volume of a storage - - or distribution medium does not bring the other work under the scope of - - this License. - - - -3. You may opt to apply the terms of the ordinary GNU General Public License - -instead of this License to a given copy of the Library. To do this, you must - -alter all the notices that refer to this License, so that they refer to the - -ordinary GNU General Public License, version 2, instead of to this License. (If a - -newer version than version 2 of the ordinary GNU General Public License has - -appeared, then you can specify that version instead if you wish.) Do not make any - -other change in these notices. - - - -Once this change is made in a given copy, it is irreversible for that copy, so - -the ordinary GNU General Public License applies to all subsequent copies and - -derivative works made from that copy. - - - -This option is useful when you wish to copy part of the code of the Library into - -a program that is not a library. - - - -4. You may copy and distribute the Library (or a portion or derivative of it, - -under Section 2) in object code or executable form under the terms of Sections 1 - -and 2 above provided that you accompany it with the complete corresponding - -machine-readable source code, which must be distributed under the terms of - -Sections 1 and 2 above on a medium customarily used for software interchange. - - - -If distribution of object code is made by offering access to copy from a - -designated place, then offering equivalent access to copy the source code from - -the same place satisfies the requirement to distribute the source code, even - -though third parties are not compelled to copy the source along with the object - -code. - - - -5. A program that contains no derivative of any portion of the Library, but is - -designed to work with the Library by being compiled or linked with it, is called - -a "work that uses the Library". Such a work, in isolation, is not a derivative - -work of the Library, and therefore falls outside the scope of this License. - - - -However, linking a "work that uses the Library" with the Library creates an - -executable that is a derivative of the Library (because it contains portions of - -the Library), rather than a "work that uses the library". The executable is - -therefore covered by this License. Section 6 states terms for distribution of - -such executables. - - - -When a "work that uses the Library" uses material from a header file that is part - -of the Library, the object code for the work may be a derivative work of the - -Library even though the source code is not. Whether this is true is especially - -significant if the work can be linked without the Library, or if the work is - -itself a library. The threshold for this to be true is not precisely defined by - -law. - - - -If such an object file uses only numerical parameters, data structure layouts and - -accessors, and small macros and small inline functions (ten lines or less in - -length), then the use of the object file is unrestricted, regardless of whether - -it is legally a derivative work. (Executables containing this object code plus - -portions of the Library will still fall under Section 6.) - - - -Otherwise, if the work is a derivative of the Library, you may distribute the - -object code for the work under the terms of Section 6. Any executables containing - -that work also fall under Section 6, whether or not they are linked directly with - -the Library itself. - - - -6. As an exception to the Sections above, you may also compile or link a "work - -that uses the Library" with the Library to produce a work containing portions of - -the Library, and distribute that work under terms of your choice, provided that - -the terms permit modification of the work for the customer's own use and reverse - -engineering for debugging such modifications. - - - -You must give prominent notice with each copy of the work that the Library is - -used in it and that the Library and its use are covered by this License. You must - -supply a copy of this License. If the work during execution displays copyright - -notices, you must include the copyright notice for the Library among them, as - -well as a reference directing the user to the copy of this License. Also, you - -must do one of these things: - - - - a) Accompany the work with the complete corresponding machine-readable - - source code for the Library including whatever changes were used in the - - work (which must be distributed under Sections 1 and 2 above); and, if the - - work is an executable linked with the Library, with the complete - - machine-readable "work that uses the Library", as object code and/or source - - code, so that the user can modify the Library and then relink to produce a - - modified executable containing the modified Library. (It is understood that - - the user who changes the contents of definitions files in the Library will - - not necessarily be able to recompile the application to use the modified - - definitions.) - - - - b) Accompany the work with a written offer, valid for at least three years, - - to give the same user the materials specified in Subsection 6a, above, for - - a charge no more than the cost of performing this distribution. - - - - c) If distribution of the work is made by offering access to copy from a - - designated place, offer equivalent access to copy the above specified - - materials from the same place. - - - - d) Verify that the user has already received a copy of these materials or - - that you have already sent this user a copy. - - - -For an executable, the required form of the "work that uses the Library" must - -include any data and utility programs needed for reproducing the executable from - -it. However, as a special exception, the source code distributed need not include - -anything that is normally distributed (in either source or binary form) with the - -major components (compiler, kernel, and so on) of the operating system on which - -the executable runs, unless that component itself accompanies the executable. - - - -It may happen that this requirement contradicts the license restrictions of other - -proprietary libraries that do not normally accompany the operating system. Such a - -contradiction means you cannot use both them and the Library together in an - -executable that you distribute. - - - -7. You may place library facilities that are a work based on the Library - -side-by-side in a single library together with other library facilities not - -covered by this License, and distribute such a combined library, provided that - -the separate distribution of the work based on the Library and of the other - -library facilities is otherwise permitted, and provided that you do these two - -things: - - - - a) Accompany the combined library with a copy of the same work based on the - - Library, uncombined with any other library facilities. This must be - - distributed under the terms of the Sections above. - - - - b) Give prominent notice with the combined library of the fact that part of - - it is a work based on the Library, and explaining where to find the - - accompanying uncombined form of the same work. - - - -8. You may not copy, modify, sublicense, link with, or distribute the Library - -except as expressly provided under this License. Any attempt otherwise to copy, - -modify, sublicense, link with, or distribute the Library is void, and will - -automatically terminate your rights under this License. However, parties who have - -received copies, or rights, from you under this License will not have their - -licenses terminated so long as such parties remain in full compliance. - - - -9. You are not required to accept this License, since you have not signed it. - -However, nothing else grants you permission to modify or distribute the Library - -or its derivative works. These actions are prohibited by law if you do not accept - -this License. Therefore, by modifying or distributing the Library (or any work - -based on the Library), you indicate your acceptance of this License to do so, and - -all its terms and conditions for copying, distributing or modifying the Library - -or works based on it. - - - -10. Each time you redistribute the Library (or any work based on the Library), - -the recipient automatically receives a license from the original licensor to - -copy, distribute, link with or modify the Library subject to these terms and - -conditions. You may not impose any further restrictions on the recipients' - -exercise of the rights granted herein. You are not responsible for enforcing - -compliance by third parties to this License. - - - -11. If, as a consequence of a court judgment or allegation of patent infringement - -or for any other reason (not limited to patent issues), conditions are imposed on - -you (whether by court order, agreement or otherwise) that contradict the - -conditions of this License, they do not excuse you from the conditions of this - -License. If you cannot distribute so as to satisfy simultaneously your - -obligations under this License and any other pertinent obligations, then as a - -consequence you may not distribute the Library at all. For example, if a patent - -license would not permit royalty-free redistribution of the Library by all those - -who receive copies directly or indirectly through you, then the only way you - -could satisfy both it and this License would be to refrain entirely from - -distribution of the Library. - - - -If any portion of this section is held invalid or unenforceable under any - -particular circumstance, the balance of the section is intended to apply, and the - -section as a whole is intended to apply in other circumstances. - - - -It is not the purpose of this section to induce you to infringe any patents or - -other property right claims or to contest validity of any such claims; this - -section has the sole purpose of protecting the integrity of the free software - -distribution system which is implemented by public license practices. Many people - -have made generous contributions to the wide range of software distributed - -through that system in reliance on consistent application of that system; it is - -up to the author/donor to decide if he or she is willing to distribute software - -through any other system and a licensee cannot impose that choice. - - - -This section is intended to make thoroughly clear what is believed to be a - -consequence of the rest of this License. - - - -12. If the distribution and/or use of the Library is restricted in certain - -countries either by patents or by copyrighted interfaces, the original copyright - -holder who places the Library under this License may add an explicit geographical - -distribution limitation excluding those countries, so that distribution is - -permitted only in or among countries not thus excluded. In such case, this - -License incorporates the limitation as if written in the body of this License. - - - -13. The Free Software Foundation may publish revised and/or new versions of the - -Library General Public License from time to time. Such new versions will be - -similar in spirit to the present version, but may differ in detail to address new - -problems or concerns. - - - -Each version is given a distinguishing version number. If the Library specifies a - -version number of this License which applies to it and "any later version", you - -have the option of following the terms and conditions either of that version or - -of any later version published by the Free Software Foundation. If the Library - -does not specify a license version number, you may choose any version ever - -published by the Free Software Foundation. - - - -14. If you wish to incorporate parts of the Library into other free programs - -whose distribution conditions are incompatible with these, write to the author to - -ask for permission. For software which is copyrighted by the Free Software - -Foundation, write to the Free Software Foundation; we sometimes make exceptions - -for this. Our decision will be guided by the two goals of preserving the free - -status of all derivatives of our free software and of promoting the sharing and - -reuse of software generally. - - - -NO WARRANTY - - - -15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE - -LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED - -IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" - -WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT - -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - -PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE - -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF - -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - -16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL - -ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE - -LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, - -SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY - -TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING - -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF - -THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER - -PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - - - - -END OF TERMS AND CONDITIONS - - - - - -How to Apply These Terms to Your New Libraries - ----------------------------------------------- - - - -If you develop a new library, and you want it to be of the greatest possible use - -to the public, we recommend making it free software that everyone can - -redistribute and change. You can do so by permitting redistribution under these - -terms (or, alternatively, under the terms of the ordinary General Public - -License). - - - -To apply these terms, attach the following notices to the library. It is safest - -to attach them to the start of each source file to most effectively convey the - -exclusion of warranty; and each file should have at least the "copyright" line - -and a pointer to where the full notice is found. - - - - one line to give the library's name and an idea of what it does. - - - - Copyright (C) year name of author - - - - This library is free software; you can redistribute it and/or - - - - modify it under the terms of the GNU Library General Public - - - - License as published by the Free Software Foundation; either - - - - version 2 of the License, or (at your option) any later version. - - - - This library is distributed in the hope that it will be useful, - - - - but WITHOUT ANY WARRANTY; without even the implied warranty of - - - - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - - - - Library General Public License for more details. - - - - You should have received a copy of the GNU Library General Public - - - - License along with this library; if not, write to the Free Software - - - - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - - - -Also add information on how to contact you by electronic and paper mail. - - - -You should also get your employer (if you work as a programmer) or your school, - -if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a - -sample; alter the names: - - - - Yoyodyne, Inc., hereby disclaims all copyright interest in - - - - the library `Frob' (a library for tweaking knobs) written - - - - by James Random Hacker. - - - - signature of Ty Coon, 1 April 1990 - - - - Ty Coon, President of Vice - - - -That's all there is to it! - - - ---- - - - -ISC License - -(go-spew v1.1.1) - - - -ISC License - - - -Copyright (c) 2012-2016 Dave Collins - - - -Permission to use, copy, modify, and/or distribute this software for any - -purpose with or without fee is hereby granted, provided that the above - -copyright notice and this permission notice appear in all copies. - - - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE - - - ---- - - - -ISC License - -(docker-cli 20180417-snapshot, go-spew 1.1.0) - - - -ISC License (ISCL) - -================== - - - -Copyright (c) 4-digit year, Company or Person's Name - - - -Permission to use, copy, modify, and/or distribute this software for any purpose - -with or without fee is hereby granted, provided that the above copyright notice - -and this permission notice appear in all copies. - - - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH - -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND - -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, - -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS - -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER - -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF - -THIS SOFTWARE. - - - ---- - - - -MIT License - -(go humanize v1.0.0) - - - -Copyright (c) 2005-2008 Dustin Sallings - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - -SOFTWARE - - - ---- - - - -MIT License - -(jwt-go v3.2.0) - - - -Copyright (c) 2012 Dave Grijalva - - - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE - - - ---- - - - -MIT License - -(beorn7-perks 20180510-snapshot-3a771d99) - - - -Copyright (C) 2013 Blake Mizerany - - - -Permission is hereby granted, free of charge, to any person obtaining - -a copy of this software and associated documentation files (the - -"Software"), to deal in the Software without restriction, including - -without limitation the rights to use, copy, modify, merge, publish, - -distribute, sublicense, and/or sell copies of the Software, and to - -permit persons to whom the Software is furnished to do so, subject to - -the following conditions: - - - -The above copyright notice and this permission notice shall be - -included in all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE - - - ---- - - - -MIT License - -(go.uuid v1.2.0) - - - -Copyright (C) 2013-2018 by Maxim Bublis - - - -Permission is hereby granted, free of charge, to any person obtaining - -a copy of this software and associated documentation files (the - -"Software"), to deal in the Software without restriction, including - -without limitation the rights to use, copy, modify, merge, publish, - -distribute, sublicense, and/or sell copies of the Software, and to - -permit persons to whom the Software is furnished to do so, subject to - -the following conditions: - - - -The above copyright notice and this permission notice shall be - -included in all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE - - - ---- - - - -MIT License - -(olekukonko-tablewriter v0.0.1) - - - -Copyright (C) 2014 by Oleku Konko - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - -THE SOFTWARE - - - ---- - - - -MIT License - -(Masterminds-semver v1.4.2) - - - -Copyright (C) 2014-2015, Matt Butcher and Matt Farina - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - -THE SOFTWARE - - - ---- - - - -MIT License - -(philhofer-fwd v1.0.0) - - - -Copyright (c) 2014-2015, Philip Hofer - - - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE - - - ---- - - - -MIT License - -(GoDoc Text v0.1.0) - - - -Copyright 2012 Keith Rarick - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - -THE SOFTWARE - - - ---- - - - -MIT License - -(tinylib-msgp 1.0.2) - - - -License: Expat - - - -Files: debian/* - -Copyright: 2015 Tianon Gravi - -License: Expat - -Comment: Debian packaging is licensed under the same terms as upstream - - - -License: Expat - - - -Permission is hereby granted, free of charge, to any person obtaining a copy of - - this software and associated documentation files (the "Software"), to deal in - - the Software without restriction, including without limitation the rights to - - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies - - of the Software, and to permit persons to whom the Software is furnished to do - - so, subject to the following conditions: - - . - - The above copyright notice and this permission notice shall be included in all - - copies or substantial portions of the Software. - - . - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - - SOFTWARE - - - ---- - - - -MIT License - -(logfmt 20140226-snapshot-b84e30ac) - - - -See http://godoc.org/github.com/kr/logfmt for format, and other documentation and examples. - - - -Copyright (C) 2013 Keith Rarick, Blake Mizerany - - - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE - - - ---- - - - -MIT License - -(stretchr/objx v0.1) - - - -The MIT License - - - -Copyright (c) 2014 Stretchr, Inc. - -Copyright (c) 2017-2018 objx contributors - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in all - -copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - -SOFTWARE - - - ---- - - - -MIT License - -(alpine-keys 2.1, Animal Sniffer Annotations 1.17, cenkalti/backoff v3.0.0, Checker Qual 2.5.2, docker-cli 20180417-snapshot, Docker-org 20190221-snapshot, github.com/konsorten/go-windows-terminal-sequences v1.0.2, github.com/microsoft/go-winio v0.4.14, GNU Ncurses 6.1_p20190518, Go Logrus v1.4.2, Go Testify 20190313-snapshot-34c6fa2d, Go Testify v1.3.0, go-logfmt-logfmt v0.4.0, golang-github-ghodss-yaml-dev 20190128-snapshot, golang-github-stretchr-testify 1.4.0, jsoniter-go v1.1.7, Kerberos 1.17, libverto 0.3.1, logfmt 0, mitchellh-go-homedir v1.1.0, mitchellh-reflectwalk v1.0.1, musl 1.1.22, rs-xid v1.2.1, shopspring-decimal 1.1.0, sigs.k8s.io/yaml v1.1.0, yaml for Go v2.2.2) - - - -The MIT License - -=============== - - - -Copyright (c) - - - -Permission is hereby granted, free of charge, to any person obtaining a copy of - -this software and associated documentation files (the "Software"), to deal in the - -Software without restriction, including without limitation the rights to use, - -copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the - -Software, and to permit persons to whom the Software is furnished to do so, - -subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in all - -copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN - -AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - ---- - - - -MIT License - -(mitchellh-copystructure v1.0.0) - - - -The MIT License (MIT) - - - -Copyright (c) 2014 Mitchell Hashimoto - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - -THE SOFTWARE - - - ---- - - - -MIT License - -(mitchellh-hashstructure v1.0.0) - - - -The MIT License (MIT) - - - -Copyright (c) 2016 Mitchell Hashimoto - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - -THE SOFTWARE - - - ---- - - - -MIT License - -(kr/pretty 0.1.0) - - - -The MIT License (MIT) - - - -Copyright 2012 Keith Rarick - - - -Permission is hereby granted, free of charge, to any person obtaining a copy - -of this software and associated documentation files (the "Software"), to deal - -in the Software without restriction, including without limitation the rights - -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - -copies of the Software, and to permit persons to whom the Software is - -furnished to do so, subject to the following conditions: - - - -The above copyright notice and this permission notice shall be included in - -all copies or substantial portions of the Software. - - - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - -THE SOFTWARE - - - ---- - - - -Mozilla Public License 2.0 - -(hashicorp-golang-lru v0.5.0) - - - -Mozilla Public License - -Version 2.0 - -====================== - - - - - -1. Definitions - --------------- - - - - 1.1. "Contributor" - - - - means each individual or legal entity that creates, contributes to the creation - - of, or owns Covered Software. - - - - 1.2. "Contributor Version" - - - - means the combination of the Contributions of others (if any) used by a - - Contributor and that particular Contributor's Contribution. - - - - 1.3. "Contribution" - - - - means Covered Software of a particular Contributor. - - - - 1.4. "Covered Software" - - - - means Source Code Form to which the initial Contributor has attached the notice - - in Exhibit A, the Executable Form of such Source Code Form, and Modifications - - of such Source Code Form, in each case including portions thereof. - - - - 1.5. "Incompatible With Secondary Licenses" - - - - means - - - - a. - - - - that the initial Contributor has attached the notice described in Exhibit B - - to the Covered Software; or - - - - b. - - - - that the Covered Software was made available under the terms of version 1.1 - - or earlier of the License, but not also under the terms of a Secondary - - License. - - - - 1.6. "Executable Form" - - - - means any form of the work other than Source Code Form. - - - - 1.7. "Larger Work" - - - - means a work that combines Covered Software with other material, in a separate - - file or files, that is not Covered Software. - - - - 1.8. "License" - - - - means this document. - - - - 1.9. "Licensable" - - - - means having the right to grant, to the maximum extent possible, whether at the - - time of the initial grant or subsequently, any and all of the rights conveyed - - by this License. - - - - 1.10. "Modifications" - - - - means any of the following: - - - - a. - - - - any file in Source Code Form that results from an addition to, deletion - - from, or modification of the contents of Covered Software; or - - - - b. - - - - any new file in Source Code Form that contains any Covered Software. - - - - 1.11. "Patent Claims" of a Contributor - - - - means any patent claim(s), including without limitation, method, process, and - - apparatus claims, in any patent Licensable by such Contributor that would be - - infringed, but for the grant of the License, by the making, using, selling, - - offering for sale, having made, import, or transfer of either its Contributions - - or its Contributor Version. - - - - 1.12. "Secondary License" - - - - means either the GNU General Public License, Version 2.0, the GNU Lesser - - General Public License, Version 2.1, the GNU Affero General Public License, - - Version 3.0, or any later versions of those licenses. - - - - 1.13. "Source Code Form" - - - - means the form of the work preferred for making modifications. - - - - 1.14. "You" (or "Your") - - - - means an individual or a legal entity exercising rights under this License. For - - legal entities, "You" includes any entity that controls, is controlled by, or - - is under common control with You. For purposes of this definition, "control" - - means (a) the power, direct or indirect, to cause the direction or management - - of such entity, whether by contract or otherwise, or (b) ownership of more than - - fifty percent (50%) of the outstanding shares or beneficial ownership of such - - entity. - - - - - -2. License Grants and Conditions - --------------------------------- - - - - - - 2.1. Grants - - - - Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive - - license: - - - - a. - - - - under intellectual property rights (other than patent or trademark) - - Licensable by such Contributor to use, reproduce, make available, modify, - - display, perform, distribute, and otherwise exploit its Contributions, - - either on an unmodified basis, with Modifications, or as part of a Larger - - Work; and - - - - b. - - - - under Patent Claims of such Contributor to make, use, sell, offer for sale, - - have made, import, and otherwise transfer either its Contributions or its - - Contributor Version. - - - - - - 2.2. Effective Date - - - - The licenses granted in Section 2.1 with respect to any Contribution become - - effective for each Contribution on the date the Contributor first distributes - - such Contribution. - - - - - - 2.3. Limitations on Grant Scope - - - - The licenses granted in this Section 2 are the only rights granted under this - - License. No additional rights or licenses will be implied from the distribution - - or licensing of Covered Software under this License. Notwithstanding - - Section 2.1(b) above, no patent license is granted by a Contributor: - - - - a. - - - - for any code that a Contributor has removed from Covered Software; or - - - - b. - - - - for infringements caused by: (i) Your and any other third party's - - modifications of Covered Software, or (ii) the combination of its - - Contributions with other software (except as part of its Contributor - - Version); or - - - - c. - - - - under Patent Claims infringed by Covered Software in the absence of its - - Contributions. - - - - This License does not grant any rights in the trademarks, service marks, or - - logos of any Contributor (except as may be necessary to comply with the notice - - requirements in Section 3.4). - - - - - - 2.4. Subsequent Licenses - - - - No Contributor makes additional grants as a result of Your choice to distribute - - the Covered Software under a subsequent version of this License (see - - Section 10.2) or under the terms of a Secondary License (if permitted under the - - terms of Section 3.3). - - - - - - 2.5. Representation - - - - Each Contributor represents that the Contributor believes its Contributions are - - its original creation(s) or it has sufficient rights to grant the rights to its - - Contributions conveyed by this License. - - - - - - 2.6. Fair Use - - - - This License is not intended to limit any rights You have under applicable - - copyright doctrines of fair use, fair dealing, or other equivalents. - - - - - - 2.7. Conditions - - - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - - Section 2.1. - - - - - -3. Responsibilities - -------------------- - - - - - - 3.1. Distribution of Source Form - - - - All distribution of Covered Software in Source Code Form, including any - - Modifications that You create or to which You contribute, must be under the - - terms of this License. You must inform recipients that the Source Code Form of - - the Covered Software is governed by the terms of this License, and how they can - - obtain a copy of this License. You may not attempt to alter or restrict the - - recipients' rights in the Source Code Form. - - - - - - 3.2. Distribution of Executable Form - - - - If You distribute Covered Software in Executable Form then: - - - - a. - - - - such Covered Software must also be made available in Source Code Form, as - - described in Section 3.1, and You must inform recipients of the Executable - - Form how they can obtain a copy of such Source Code Form by reasonable - - means in a timely manner, at a charge no more than the cost of distribution - - to the recipient; and - - - - b. - - - - You may distribute such Executable Form under the terms of this License, or - - sublicense it under different terms, provided that the license for the - - Executable Form does not attempt to limit or alter the recipients' rights - - in the Source Code Form under this License. - - - - - - 3.3. Distribution of a Larger Work - - - - You may create and distribute a Larger Work under terms of Your choice, - - provided that You also comply with the requirements of this License for the - - Covered Software. If the Larger Work is a combination of Covered Software with - - a work governed by one or more Secondary Licenses, and the Covered Software is - - not Incompatible With Secondary Licenses, this License permits You to - - additionally distribute such Covered Software under the terms of such Secondary - - License(s), so that the recipient of the Larger Work may, at their option, - - further distribute the Covered Software under the terms of either this License - - or such Secondary License(s). - - - - - - 3.4. Notices - - - - You may not remove or alter the substance of any license notices (including - - copyright notices, patent notices, disclaimers of warranty, or limitations of - - liability) contained within the Source Code Form of the Covered Software, - - except that You may alter any license notices to the extent required to remedy - - known factual inaccuracies. - - - - - - 3.5. Application of Additional Terms - - - - You may choose to offer, and to charge a fee for, warranty, support, indemnity - - or liability obligations to one or more recipients of Covered Software. - - However, You may do so only on Your own behalf, and not on behalf of any - - Contributor. You must make it absolutely clear that any such warranty, support, - - indemnity, or liability obligation is offered by You alone, and You hereby - - agree to indemnify every Contributor for any liability incurred by such - - Contributor as a result of warranty, support, indemnity or liability terms You - - offer. You may include additional disclaimers of warranty and limitations of - - liability specific to any jurisdiction. - - - - - -4. Inability to Comply Due to Statute or Regulation - ---------------------------------------------------- - - - -If it is impossible for You to comply with any of the terms of this License with - -respect to some or all of the Covered Software due to statute, judicial order, or - -regulation then You must: (a) comply with the terms of this License to the - -maximum extent possible; and (b) describe the limitations and the code they - -affect. Such description must be placed in a text file included with all - -distributions of the Covered Software under this License. Except to the extent - -prohibited by statute or regulation, such description must be sufficiently - -detailed for a recipient of ordinary skill to be able to understand it. - - - - - -5. Termination - --------------- - - - - 5.1. The rights granted under this License will terminate automatically if You - - fail to comply with any of its terms. However, if You become compliant, then - - the rights granted under this License from a particular Contributor are - - reinstated (a) provisionally, unless and until such Contributor explicitly and - - finally terminates Your grants, and (b) on an ongoing basis, if such - - Contributor fails to notify You of the non-compliance by some reasonable means - - prior to 60 days after You have come back into compliance. Moreover, Your - - grants from a particular Contributor are reinstated on an ongoing basis if such - - Contributor notifies You of the non-compliance by some reasonable means, this - - is the first time You have received notice of non-compliance with this License - - from such Contributor, and You become compliant prior to 30 days after Your - - receipt of the notice. - - - - 5.2. If You initiate litigation against any entity by asserting a patent - - infringement claim (excluding declaratory judgment actions, counter-claims, and - - cross-claims) alleging that a Contributor Version directly or indirectly - - infringes any patent, then the rights granted to You by any and all - - Contributors for the Covered Software under Section 2.1 of this License shall - - terminate. - - - - 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - - license agreements (excluding distributors and resellers) which have been - - validly granted by You or Your distributors under this License prior to - - termination shall survive termination. - - - - - -6. Disclaimer of Warranty - -------------------------- - - - -Covered Software is provided under this License on an "as is" basis, without - -warranty of any kind, either expressed, implied, or statutory, including, without - -limitation, warranties that the Covered Software is free of defects, - -merchantable, fit for a particular purpose or non-infringing. The entire risk as - -to the quality and performance of the Covered Software is with You. Should any - -Covered Software prove defective in any respect, You (not any Contributor) assume - -the cost of any necessary servicing, repair, or correction. This disclaimer of - -warranty constitutes an essential part of this License. No use of any Covered - -Software is authorized under this License except under this disclaimer. - - - - - -7. Limitation of Liability - --------------------------- - - - -Under no circumstances and under no legal theory, whether tort (including - -negligence), contract, or otherwise, shall any Contributor, or anyone who - -distributes Covered Software as permitted above, be liable to You for any direct, - -indirect, special, incidental, or consequential damages of any character - -including, without limitation, damages for lost profits, loss of goodwill, work - -stoppage, computer failure or malfunction, or any and all other commercial - -damages or losses, even if such party shall have been informed of the possibility - -of such damages. This limitation of liability shall not apply to liability for - -death or personal injury resulting from such party's negligence to the extent - -applicable law prohibits such limitation. Some jurisdictions do not allow the - -exclusion or limitation of incidental or consequential damages, so this exclusion - -and limitation may not apply to You. - - - - - -8. Litigation - -------------- - - - -Any litigation relating to this License may be brought only in the courts of a - -jurisdiction where the defendant maintains its principal place of business and - -such litigation shall be governed by laws of that jurisdiction, without reference - -to its conflict-of-law provisions. Nothing in this Section shall prevent a - -party's ability to bring cross-claims or counter-claims. - - - - - -9. Miscellaneous - ----------------- - - - -This License represents the complete agreement concerning the subject matter - -hereof. If any provision of this License is held to be unenforceable, such - -provision shall be reformed only to the extent necessary to make it enforceable. - -Any law or regulation which provides that the language of a contract shall be - -construed against the drafter shall not be used to construe this License against - -a Contributor. - - - - - -10. Versions of the License - ---------------------------- - - - - - - 10.1. New Versions - - - - Mozilla Foundation is the license steward. Except as provided in Section 10.3, - - no one other than the license steward has the right to modify or publish new - - versions of this License. Each version will be given a distinguishing version - - number. - - - - - - 10.2. Effect of New Versions - - - - You may distribute the Covered Software under the terms of the version of the - - License under which You originally received the Covered Software, or under the - - terms of any subsequent version published by the license steward. - - - - - - 10.3. Modified Versions - - - - If you create software not governed by this License, and you want to create a - - new license for such software, you may create and use a modified version of - - this License if you rename the license and remove any references to the name of - - the license steward (except to note that such modified license differs from - - this License). - - - - - - 10.4. Distributing Source Code Form that is Incompatible With Secondary - - Licenses - - - - If You choose to distribute Source Code Form that is Incompatible With - - Secondary Licenses under the terms of this version of the License, the notice - - described in Exhibit B of this License must be attached. - - - - - -Exhibit A - Source Code Form License Notice - -------------------------------------------- - - - - This Source Code Form is subject to the terms of the Mozilla Public License, - - v. 2.0. If a copy of the MPL was not distributed with this file, You can - - obtain one at http://mozilla.org/MPL/2.0/. - - - -If it is not possible or desirable to put the notice in a particular file, then - -You may include the notice in a location (such as a LICENSE file in a relevant - -directory) where a recipient would be likely to look for such a notice. - - - -You may add additional accurate notices of copyright ownership. - - - - - -Exhibit B - "Incompatible With Secondary Licenses" Notice - ---------------------------------------------------------- - - - - This Source Code Form is "Incompatible With Secondary Licenses", as defined - - by the Mozilla Public License, v. 2.0. - - - ---- - - - -OpenSSL License - -(LibreSSL Portable Security Libraries 2.9.1) - - - -OpenSSL Project License - -======================= - - - -OpenSSL License - ---------------- - - - -==================================================================== - -Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved. - - - -Redistribution and use in source and binary forms, with or without modification, - -are permitted provided that the following conditions are met: - - - - 1. Redistributions of source code must retain the above copyright notice, this - - list of conditions and the following disclaimer. - - - - 2. Redistributions in binary form must reproduce the above copyright notice, - - this list of conditions and the following disclaimer in the documentation - - and/or other materials provided with the distribution. - - - - 3. All advertising materials mentioning features or use of this software must - - display the following acknowledgment: "This product includes software - - developed by the OpenSSL Project for use in the OpenSSL Toolkit. - - (http://www.openssl.org/)" - - - - 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - - endorse or promote products derived from this software without prior written - - permission. For written permission, please contact openssl-core@openssl.org. - - - - 5. Products derived from this software may not be called "OpenSSL" nor may - - "OpenSSL" appear in their names without prior written permission of the - - OpenSSL Project. - - - - 6. Redistributions of any form whatsoever must retain the following - - acknowledgment: - - - - "This product includes software developed by the OpenSSL Project for use in - - the OpenSSL Toolkit (http://www.openssl.org/)" - - - -THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY EXPRESSED OR - -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - -SHALL THE OpenSSL PROJECT OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - -OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - -THE POSSIBILITY OF SUCH DAMAGE. - -==================================================================== - - - -This product includes cryptographic software written by Eric Young - -(eay@cryptsoft.com). This product includes software written by Tim Hudson - -(tjh@cryptsoft.com). - - - ---- - - - -SSLeay License - -(LibreSSL Portable Security Libraries 2.9.1) - - - -SSLeay License - -============== - - - - - -Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved. - ---------------------------------------------------------------------------- - - - -This package is an SSL implementation written by Eric Young (eay@cryptsoft.com). - -The implementation was written so as to conform with Netscapes SSL. This library - -is free for commercial and non-commercial use as long as the following conditions - -are aheared to. The following conditions apply to all code found in this - -distribution, be it the RC4, RSA, lhash, DES, etc., code; not just the SSL code. - -The SSL documentation included with this distribution is covered by the same - -copyright terms except that the holder is Tim Hudson (tjh@cryptsoft.com). - - - -Copyright remains Eric Young's, and as such any Copyright notices in the code are - -not to be removed. If this package is used in a product, Eric Young should be - -given attribution as the author of the parts of the library used. This can be in - -the form of a textual message at program startup or in documentation (online or - -textual) provided with the package. Redistribution and use in source and binary - -forms, with or without modification, are permitted provided that the following - -conditions are met: - - - - 1. Redistributions of source code must retain the copyright notice, this list - - of conditions and the following disclaimer. - - - - 2. Redistributions in binary form must reproduce the above copyright notice, - - this list of conditions and the following disclaimer in the documentation - - and/or other materials provided with the distribution. - - - - 3. All advertising materials mentioning features or use of this software must - - display the following acknowledgement: "This product includes cryptographic - - software written by Eric Young (eay@cryptsoft.com)". The word 'cryptographic' - - can be left out if the rouines from the library being used are not - - cryptographic related :-). - - - - 4. If you include any Windows specific code (or a derivative thereof) from the - - apps directory (application code) you must include an acknowledgement: "This - - product includes software written by Tim Hudson (tjh@cryptsoft.com)" - - - - - -THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND ANY EXPRESS OR IMPLIED - -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - -SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - -SUCH DAMAGE.The licence and distribution terms for any publically available - -version or derivative of this code cannot be changed. i.e. this code cannot - -simply be copied and put under another distribution licence [including the GNU - -Public Licence. - - diff --git a/vendor/github.com/netapp/trident/config/config.go b/vendor/github.com/netapp/trident/config/config.go deleted file mode 100644 index 304449764..000000000 --- a/vendor/github.com/netapp/trident/config/config.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package config - -import ( - "fmt" - "time" - - log "github.com/sirupsen/logrus" - - "github.com/netapp/trident/utils" -) - -type Protocol string -type AccessMode string -type VolumeMode string -type VolumeType string -type DriverContext string -type Platform string - -type Telemetry struct { - TridentVersion string `json:"version"` - Platform string `json:"platform"` - PlatformVersion string `json:"platformVersion"` -} - -type PersistentStateVersion struct { - PersistentStoreVersion string `json:"store_version"` - OrchestratorAPIVersion string `json:"orchestrator_api_version"` -} - -const ( - /* Misc. orchestrator constants */ - OrchestratorName = "trident" - OrchestratorClientName = OrchestratorName + "ctl" - orchestratorVersion = "19.10.0" - OrchestratorAPIVersion = "1" - PersistentStoreBootstrapAttempts = 30 - PersistentStoreBootstrapTimeout = PersistentStoreBootstrapAttempts * time.Second - PersistentStoreTimeout = 10 * time.Second - - /* REST/HTTP constants */ - HTTPTimeout = 90 * time.Second - - CACertName = "trident-ca" - ServerCertName = "trident-csi" // Must match CSI service name - ClientCertName = "trident-node" - - CAKeyFile = "caKey" - CACertFile = "caCert" - ServerKeyFile = "serverKey" - ServerCertFile = "serverCert" - ClientKeyFile = "clientKey" - ClientCertFile = "clientCert" - - certsPath = "/certs/" - - CAKeyPath = certsPath + CAKeyFile - CACertPath = certsPath + CACertFile - ServerKeyPath = certsPath + ServerKeyFile - ServerCertPath = certsPath + ServerCertFile - ClientKeyPath = certsPath + ClientKeyFile - ClientCertPath = certsPath + ClientCertFile - - /* Protocol constants. This value denotes a volume's backing storage protocol. For example, - a Trident volume with 'file' protocol is most likely NFS, while a 'block' protocol volume is probably iSCSI. */ - File Protocol = "file" - Block Protocol = "block" - ProtocolAny Protocol = "" - - /* Access mode constants */ - ReadWriteOnce AccessMode = "ReadWriteOnce" - ReadOnlyMany AccessMode = "ReadOnlyMany" - ReadWriteMany AccessMode = "ReadWriteMany" - ModeAny AccessMode = "" - - /* Volume mode constants. This value describes how a volume will be consumed by application containers. - Most Trident volumes (regardless of protocol) probably use the 'Filesystem' mode, where the volume contains - a filesystem and is mounted into a container. By contrast, volumes with 'Block' mode always use 'block' protocol - and are attached to a container as raw block devices. */ - RawBlock VolumeMode = "Block" - Filesystem VolumeMode = "Filesystem" - - /* Volume type constants */ - OntapNFS VolumeType = "ONTAP_NFS" - OntapISCSI VolumeType = "ONTAP_iSCSI" - SolidFireISCSI VolumeType = "SolidFire_iSCSI" - ESeriesISCSI VolumeType = "Eseries_iSCSI" - UnknownVolumeType VolumeType = "" - - /* Driver-related constants */ - DefaultSolidFireVAG = OrchestratorName - UnknownDriver = "UnknownDriver" - StorageAPITimeoutSeconds = 90 - SANResizeDelta = 50000000 // 50mb - - /* REST frontend constants */ - MaxRESTRequestSize = 10240 - - /* Kubernetes deployment constants */ - ContainerTrident = "trident-main" - ContainerEtcd = "etcd" - - ContextDocker DriverContext = "docker" - ContextKubernetes DriverContext = "kubernetes" - ContextCSI DriverContext = "csi" - ContextCRD DriverContext = "crd" - - PlatformDocker Platform = "docker" - PlatformKubernetes Platform = "kubernetes" - PlatformCSI Platform = "csi" // plain CSI, no other CO present - - // Minimum and maximum supported Kubernetes versions - KubernetesVersionMin = "v1.11.0" - KubernetesVersionMax = "v1.16.0" - - // Minimum Kubernetes version for CSI Trident (non-CSI is the default) - KubernetesCSIVersionMinOptional = "v1.13.0" - - // Minimum Kubernetes version for CSI Trident default (non-CSI not supported) - KubernetesCSIVersionMinForced = "v1.14.0" - - TridentNamespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" -) - -var ( - validProtocols = map[Protocol]bool{ - File: true, - Block: true, - ProtocolAny: true, - } - - // BuildHash is the git hash the binary was built from - BuildHash = "unknown" - - // BuildType is the type of build: custom, beta or stable - BuildType = "custom" - - // BuildTypeRev is the revision of the build - BuildTypeRev = "0" - - // BuildTime is the time the binary was built - BuildTime = "unknown" - - // BuildImage is the Trident image that was built - BuildImage = "netapp/trident:" + orchestratorVersion + "-custom.0" - - // BuildEtcdVersion is the etcd version that Trident should be deployed with - BuildEtcdVersion = "v3.3.13" - - // BuildEtcdImage is the etcd image that Trident should be deployed with - BuildEtcdImage = "quay.io/coreos/etcd:" + BuildEtcdVersion - - OrchestratorVersion = utils.MustParseDate(version()) - - /* API Server and persistent store variables */ - BaseURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion - VersionURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/version" - BackendURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/backend" - BackendUUIDURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/backendUUID" - VolumeURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/volume" - TransactionURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/txn" - StorageClassURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/storageclass" - NodeURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/node" - SnapshotURL = "/" + OrchestratorName + "/v" + OrchestratorAPIVersion + "/snapshot" - StoreURL = "/" + OrchestratorName + "/store" - - UsingPassthroughStore bool - CurrentDriverContext DriverContext - OrchestratorTelemetry = Telemetry{TridentVersion: OrchestratorVersion.String()} -) - -func IsValidProtocol(p Protocol) bool { - _, ok := validProtocols[p] - return ok -} - -func GetValidProtocolNames() []string { - ret := make([]string, len(validProtocols)) - for key := range validProtocols { - ret = append(ret, string(key)) - } - return ret -} - -func PlatformAtLeast(platformName string, version string) bool { - if OrchestratorTelemetry.Platform == platformName { - platformVersion := utils.MustParseSemantic(OrchestratorTelemetry.PlatformVersion) - requiredVersion, err := utils.ParseSemantic(version) - if err != nil { - log.WithFields(log.Fields{ - "platform": platformName, - "version": version, - }).Errorf("Platform version check failed. %+v", err) - return false - } - if platformVersion.AtLeast(requiredVersion) { - return true - } - } - return false -} - -func version() string { - - var version string - - if BuildType != "stable" { - if BuildType == "custom" { - version = fmt.Sprintf("%v-%v+%v", orchestratorVersion, BuildType, BuildHash) - } else { - version = fmt.Sprintf("%v-%v.%v+%v", orchestratorVersion, BuildType, BuildTypeRev, BuildHash) - } - } else { - version = orchestratorVersion - } - - return version -} diff --git a/vendor/github.com/netapp/trident/storage/backend.go b/vendor/github.com/netapp/trident/storage/backend.go deleted file mode 100644 index e5532d3e3..000000000 --- a/vendor/github.com/netapp/trident/storage/backend.go +++ /dev/null @@ -1,902 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package storage - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - "time" - - "github.com/RoaringBitmap/roaring" - "github.com/cenkalti/backoff" - "github.com/mitchellh/copystructure" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/utils" -) - -// Driver provides a common interface for storage related operations -type Driver interface { - Name() string - Initialize(tridentconfig.DriverContext, string, *drivers.CommonStorageDriverConfig) error - Initialized() bool - // Terminate tells the driver to clean up, as it won't be called again. - Terminate() - Create(volConfig *VolumeConfig, storagePool *Pool, volAttributes map[string]sa.Request) error - CreatePrepare(volConfig *VolumeConfig) error - // CreateFollowup adds necessary information for accessing the volume to VolumeConfig. - CreateFollowup(volConfig *VolumeConfig) error - // GetInternalVolumeName will return a name that satisfies any character - // constraints present on the backend and that will be unique to Trident. - // The latter requirement should generally be done by prepending the - // value of CommonStorageDriver.SnapshotPrefix to the name. - CreateClone(volConfig *VolumeConfig) error - Import(volConfig *VolumeConfig, originalName string) error - Destroy(name string) error - Rename(name string, newName string) error - Resize(volConfig *VolumeConfig, sizeBytes uint64) error - Get(name string) error - GetInternalVolumeName(name string) string - GetStorageBackendSpecs(backend *Backend) error - GetProtocol() tridentconfig.Protocol - Publish(name string, publishInfo *utils.VolumePublishInfo) error - GetSnapshot(snapConfig *SnapshotConfig) (*Snapshot, error) - GetSnapshots(volConfig *VolumeConfig) ([]*Snapshot, error) - CreateSnapshot(snapConfig *SnapshotConfig) (*Snapshot, error) - RestoreSnapshot(snapConfig *SnapshotConfig) error - DeleteSnapshot(snapConfig *SnapshotConfig) error - StoreConfig(b *PersistentStorageBackendConfig) - // GetExternalConfig returns a version of the driver configuration that - // lacks confidential information, such as usernames and passwords. - GetExternalConfig() interface{} - // GetVolumeExternal accepts the internal name of a volume and returns a VolumeExternal - // object. This method is only available if using the passthrough store (i.e. Docker). - GetVolumeExternal(name string) (*VolumeExternal, error) - // GetVolumeExternalWrappers reads all volumes owned by this driver from the storage backend and - // writes them to the supplied channel as VolumeExternalWrapper objects. This method is only - // available if using the passthrough store (i.e. Docker). - GetVolumeExternalWrappers(chan *VolumeExternalWrapper) - GetUpdateType(driver Driver) *roaring.Bitmap -} - -type Backend struct { - Driver Driver - Name string - BackendUUID string - Online bool - State BackendState - Storage map[string]*Pool - Volumes map[string]*Volume -} - -type UpdateBackendStateRequest struct { - State string `json:"state"` -} - -type NotManagedError struct { - volumeName string -} - -func (e *NotManagedError) Error() string { - return fmt.Sprintf("volume %s is not managed by Trident", e.volumeName) -} - -type BackendState string - -const ( - Unknown = BackendState("unknown") - Online = BackendState("online") - Offline = BackendState("offline") - Deleting = BackendState("deleting") - Failed = BackendState("failed") -) - -func (s BackendState) String() string { - switch s { - case Unknown, Online, Offline, Deleting, Failed: - return string(s) - default: - return "unknown" - } -} - -func (s BackendState) IsUnknown() bool { - switch s { - case Online, Offline, Deleting, Failed: - return false - case Unknown: - return true - default: - return true - } -} - -func (s BackendState) IsOnline() bool { - return s == Online -} - -func (s BackendState) IsOffline() bool { - return s == Offline -} - -func (s BackendState) IsDeleting() bool { - return s == Deleting -} - -func (s BackendState) IsFailed() bool { - return s == Failed -} - -func NewStorageBackend(driver Driver) (*Backend, error) { - backend := Backend{ - Driver: driver, - State: Online, - Online: true, - Storage: make(map[string]*Pool), - Volumes: make(map[string]*Volume), - } - - // retrieve backend specs - if err := backend.Driver.GetStorageBackendSpecs(&backend); err != nil { - return nil, err - } - - return &backend, nil -} - -func NewFailedStorageBackend(driver Driver) *Backend { - backend := Backend{ - Driver: driver, - State: Failed, - Storage: make(map[string]*Pool), - Volumes: make(map[string]*Volume), - } - - log.WithFields(log.Fields{ - "backend": backend, - "driver": driver, - }).Debug("NewFailedStorageBackend.") - - return &backend -} - -func (b *Backend) AddStoragePool(pool *Pool) { - b.Storage[pool.Name] = pool -} - -func (b *Backend) GetDriverName() string { - return b.Driver.Name() -} - -func (b *Backend) GetProtocol() tridentconfig.Protocol { - return b.Driver.GetProtocol() -} - -func (b *Backend) AddVolume( - volConfig *VolumeConfig, storagePool *Pool, volAttributes map[string]sa.Request, -) (*Volume, error) { - - var err error - - log.WithFields(log.Fields{ - "backend": b.Name, - "backendUUID": b.BackendUUID, - "volume": volConfig.InternalName, - "storage_pool": storagePool.Name, - "size": volConfig.Size, - "storage_class": volConfig.StorageClass, - }).Debug("Attempting volume create.") - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - // CreatePrepare should perform the following tasks: - // 1. Generate the internal volume name - // 2. Optionally perform any other steps that could veto volume creation - if err = b.Driver.CreatePrepare(volConfig); err != nil { - return nil, err - } - - // Add volume to the backend - volumeExists := false - if err = b.Driver.Create(volConfig, storagePool, volAttributes); err != nil { - - if drivers.IsVolumeExistsError(err) { - - // Implement idempotency by ignoring the error if the volume exists already - volumeExists = true - - log.WithFields(log.Fields{ - "backend": b.Name, - "volume": volConfig.InternalName, - }).Warning("Volume already exists.") - - } else { - // If the volume doesn't exist but the create failed, return the error - return nil, err - } - } - - // Always perform the follow-up steps - if err = b.Driver.CreateFollowup(volConfig); err != nil { - - // If follow-up fails and we just created the volume, clean up by deleting it - if !volumeExists { - errDestroy := b.Driver.Destroy(volConfig.InternalName) - if errDestroy != nil { - log.WithFields(log.Fields{ - "backend": b.Name, - "volume": volConfig.InternalName, - }).Warnf("Mapping the created volume failed "+ - "and %s wasn't able to delete it afterwards: %s. "+ - "Volume must be manually deleted.", - tridentconfig.OrchestratorName, errDestroy) - } - } - - // In all cases where follow-up fails, return the follow-up error - return nil, err - } - - vol := NewVolume(volConfig, b.BackendUUID, storagePool.Name, false) - b.Volumes[vol.Config.Name] = vol - return vol, nil -} - -func (b *Backend) CloneVolume(volConfig *VolumeConfig) (*Volume, error) { - - log.WithFields(log.Fields{ - "backend": volConfig.Name, - "backendUUID": b.BackendUUID, - "storage_class": volConfig.StorageClass, - "source_volume": volConfig.CloneSourceVolume, - "source_volume_internal": volConfig.CloneSourceVolumeInternal, - "source_snapshot": volConfig.CloneSourceSnapshot, - "clone_volume": volConfig.Name, - "clone_volume_internal": volConfig.InternalName, - }).Debug("Attempting volume clone.") - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return nil, &NotManagedError{volConfig.InternalName} - } - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - // CreatePrepare should perform the following tasks: - // 1. Sanitize the volume name - // 2. Ensure no volume with the same name exists on that backend - if err := b.Driver.CreatePrepare(volConfig); err != nil { - return nil, fmt.Errorf("failed to prepare clone create: %v", err) - } - - err := b.Driver.CreateClone(volConfig) - if err != nil { - return nil, err - } - - // The clone may not be fully created when the clone API returns, so wait here until it exists. - checkCloneExists := func() error { - return b.Driver.Get(volConfig.InternalName) - } - cloneExistsNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Clone not yet present, waiting.") - } - cloneBackoff := backoff.NewExponentialBackOff() - cloneBackoff.InitialInterval = 1 * time.Second - cloneBackoff.Multiplier = 2 - cloneBackoff.RandomizationFactor = 0.1 - cloneBackoff.MaxElapsedTime = 90 * time.Second - - // Run the clone check using an exponential backoff - if err := backoff.RetryNotify(checkCloneExists, cloneBackoff, cloneExistsNotify); err != nil { - log.WithField("clone_volume", volConfig.Name).Warnf("Could not find clone after %3.2f seconds.", - float64(cloneBackoff.MaxElapsedTime)) - } else { - log.WithField("clone_volume", volConfig.Name).Debug("Clone found.") - } - - err = b.Driver.CreateFollowup(volConfig) - if err != nil { - errDestroy := b.Driver.Destroy(volConfig.InternalName) - if errDestroy != nil { - log.WithFields(log.Fields{ - "backend": b.Name, - "volume": volConfig.InternalName, - }).Warnf("Mapping the created volume failed "+ - "and %s wasn't able to delete it afterwards: %s. "+ - "Volume needs to be manually deleted.", - tridentconfig.OrchestratorName, errDestroy) - } - return nil, err - } - vol := NewVolume(volConfig, b.BackendUUID, drivers.UnsetPool, false) - b.Volumes[vol.Config.Name] = vol - return vol, nil -} - -func (b *Backend) GetVolumeExternal(volumeName string) (*VolumeExternal, error) { - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - if b.Driver.Get(volumeName) != nil { - return nil, fmt.Errorf("volume %s was not found", volumeName) - } - - volExternal, err := b.Driver.GetVolumeExternal(volumeName) - if err != nil { - return nil, fmt.Errorf("error requesting volume size: %v", err) - } - volExternal.Backend = b.Name - volExternal.BackendUUID = b.BackendUUID - return volExternal, nil -} - -func (b *Backend) ImportVolume(volConfig *VolumeConfig) (*Volume, error) { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volume": volConfig.ImportOriginalName, - "NotManaged": volConfig.ImportNotManaged, - }).Debug("Backend#ImportVolume") - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - if volConfig.ImportNotManaged { - // The volume is not managed and will not be renamed during import. - volConfig.InternalName = volConfig.ImportOriginalName - } else { - // CreatePrepare should perform the following tasks: - // 1. Sanitize the volume name - // 2. Ensure no volume with the same name exists on that backend - if err := b.Driver.CreatePrepare(volConfig); err != nil { - return nil, fmt.Errorf("failed to prepare import volume: %v", err) - } - } - - err := b.Driver.Import(volConfig, volConfig.ImportOriginalName) - if err != nil { - return nil, fmt.Errorf("driver import volume failed: %v", err) - } - - err = b.Driver.CreateFollowup(volConfig) - if err != nil { - return nil, fmt.Errorf("failed post import volume operations : %v", err) - } - - volume := NewVolume(volConfig, b.BackendUUID, drivers.UnsetPool, false) - b.Volumes[volume.Config.Name] = volume - return volume, nil -} - -func (b *Backend) ResizeVolume(volConfig *VolumeConfig, newSize string) error { - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return &NotManagedError{volConfig.InternalName} - } - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return err - } - - // Determine volume size in bytes - requestedSize, err := utils.ConvertSizeToBytes(newSize) - if err != nil { - return fmt.Errorf("could not convert volume size %s: %v", newSize, err) - } - newSizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) - if err != nil { - return fmt.Errorf("%v is an invalid volume size: %v", newSize, err) - } - - log.WithFields(log.Fields{ - "backend": b.Name, - "volume": volConfig.InternalName, - "volume_size": newSizeBytes, - }).Debug("Attempting volume resize.") - return b.Driver.Resize(volConfig, newSizeBytes) -} - -func (b *Backend) RenameVolume(volConfig *VolumeConfig, newName string) error { - - oldName := volConfig.InternalName - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return &NotManagedError{oldName} - } - - if b.State != Online { - log.WithFields(log.Fields{ - "state": b.State, - "expectedState": string(Online), - }).Error("Invalid backend state.") - return fmt.Errorf("backend %s is not Online", b.Name) - } - - if err := b.Driver.Get(oldName); err != nil { - return fmt.Errorf("volume %s not found on backend %s; %v", oldName, b.Name, err) - } - if err := b.Driver.Rename(oldName, newName); err != nil { - return fmt.Errorf("error attempting to rename volume %s on backend %s: %v", oldName, b.Name, err) - } - return nil -} - -func (b *Backend) RemoveVolume(volConfig *VolumeConfig) error { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volume": volConfig.Name, - }).Debug("Backend#RemoveVolume") - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return &NotManagedError{volConfig.InternalName} - } - - // Ensure backend is ready - if err := b.ensureOnlineOrDeleting(); err != nil { - return err - } - - if err := b.Driver.Destroy(volConfig.InternalName); err != nil { - // TODO: Check the error being returned once the nDVP throws errors - // for volumes that aren't found. - return err - } - b.RemoveCachedVolume(volConfig.Name) - return nil -} - -func (b *Backend) RemoveCachedVolume(volumeName string) { - - if _, ok := b.Volumes[volumeName]; ok { - delete(b.Volumes, volumeName) - } -} - -func (b *Backend) GetSnapshot(snapConfig *SnapshotConfig) (*Snapshot, error) { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": snapConfig.VolumeName, - "snapshotName": snapConfig.Name, - }).Debug("GetSnapshot.") - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - if snapshot, err := b.Driver.GetSnapshot(snapConfig); err != nil { - // An error here means we couldn't check for the snapshot. It does not mean the snapshot doesn't exist. - return nil, err - } else if snapshot == nil { - // No error and no snapshot means the snapshot doesn't exist. - return nil, fmt.Errorf("snapshot %s on volume %s not found", snapConfig.Name, snapConfig.VolumeName) - } else { - return snapshot, nil - } -} - -func (b *Backend) GetSnapshots(volConfig *VolumeConfig) ([]*Snapshot, error) { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": volConfig.Name, - }).Debug("GetSnapshots.") - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - return b.Driver.GetSnapshots(volConfig) -} - -func (b *Backend) CreateSnapshot(snapConfig *SnapshotConfig, volConfig *VolumeConfig) (*Snapshot, error) { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": snapConfig.VolumeName, - "snapshotName": snapConfig.Name, - }).Debug("Attempting snapshot create.") - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return nil, &NotManagedError{volConfig.InternalName} - } - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return nil, err - } - - // Set the default internal snapshot name to match the snapshot name. Drivers - // may override this value in the SnapshotConfig structure if necessary. - snapConfig.InternalName = snapConfig.Name - - // Implement idempotency by checking for the snapshot first - if existingSnapshot, err := b.Driver.GetSnapshot(snapConfig); err != nil { - - // An error here means we couldn't check for the snapshot. It does not mean the snapshot doesn't exist. - return nil, err - - } else if existingSnapshot != nil { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": snapConfig.VolumeName, - "snapshotName": snapConfig.Name, - }).Warning("Snapshot already exists.") - - // Snapshot already exists, so just return it - return existingSnapshot, nil - } - - // Create snapshot - return b.Driver.CreateSnapshot(snapConfig) -} - -func (b *Backend) RestoreSnapshot(snapConfig *SnapshotConfig, volConfig *VolumeConfig) error { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": snapConfig.VolumeName, - "snapshotName": snapConfig.Name, - }).Debug("Attempting snapshot restore.") - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return &NotManagedError{volConfig.InternalName} - } - - // Ensure backend is ready - if err := b.ensureOnline(); err != nil { - return err - } - - // Restore snapshot - return b.Driver.RestoreSnapshot(snapConfig) -} - -func (b *Backend) DeleteSnapshot(snapConfig *SnapshotConfig, volConfig *VolumeConfig) error { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": snapConfig.VolumeName, - "snapshotName": snapConfig.Name, - }).Debug("Attempting snapshot delete.") - - // Ensure volume is managed - if volConfig.ImportNotManaged { - return &NotManagedError{volConfig.InternalName} - } - - // Ensure backend is ready - if err := b.ensureOnlineOrDeleting(); err != nil { - return err - } - - // Implement idempotency by checking for the snapshot first - if existingSnapshot, err := b.Driver.GetSnapshot(snapConfig); err != nil { - - // An error here means we couldn't check for the snapshot. It does not mean the snapshot doesn't exist. - return err - - } else if existingSnapshot == nil { - - log.WithFields(log.Fields{ - "backend": b.Name, - "volumeName": snapConfig.VolumeName, - "snapshotName": snapConfig.Name, - }).Warning("Snapshot not found.") - - // Snapshot does not exist, so just return without error. - return nil - } - - // Delete snapshot - return b.Driver.DeleteSnapshot(snapConfig) -} - -const ( - BackendRename = iota - VolumeAccessInfoChange - InvalidUpdate - UsernameChange - PasswordChange -) - -func (b *Backend) GetUpdateType(origBackend *Backend) *roaring.Bitmap { - updateCode := b.Driver.GetUpdateType(origBackend.Driver) - if b.Name != origBackend.Name { - updateCode.Add(BackendRename) - } - return updateCode -} - -// HasVolumes returns true if the Backend has one or more volumes -// provisioned on it. -func (b *Backend) HasVolumes() bool { - return len(b.Volumes) > 0 -} - -// Terminate informs the backend that it is being deleted from the core -// and will not be called again. This may be a signal to the storage -// driver to clean up and stop any ongoing operations. -func (b *Backend) Terminate() { - - logFields := log.Fields{ - "backend": b.Name, - "backendUUID": b.BackendUUID, - "driver": b.GetDriverName(), - "state": string(b.State), - } - - if !b.Driver.Initialized() { - log.WithFields(logFields).Warning("Cannot terminate an uninitialized backend.") - } else { - log.WithFields(logFields).Debug("Terminating backend.") - b.Driver.Terminate() - } -} - -func (b *Backend) ensureOnline() error { - if b.State != Online { - log.WithFields(log.Fields{ - "state": b.State, - "expectedState": string(Online), - }).Error("Invalid backend state.") - return fmt.Errorf("backend %s is not Online", b.Name) - } - return nil -} - -func (b *Backend) ensureOnlineOrDeleting() error { - if b.State != Online && b.State != Deleting { - log.WithFields(log.Fields{ - "state": b.State, - "expectedState": string(Online) + "/" + string(Deleting), - }).Error("Invalid backend state.") - return fmt.Errorf("backend %s is not Online or Deleting", b.Name) - } - return nil -} - -type BackendExternal struct { - Name string `json:"name"` - BackendUUID string `json:"backendUUID"` - Protocol tridentconfig.Protocol `json:"protocol"` - Config interface{} `json:"config"` - Storage map[string]interface{} `json:"storage"` - State BackendState `json:"state"` - Online bool `json:"online"` - Volumes []string `json:"volumes"` -} - -func (b *Backend) ConstructExternal() *BackendExternal { - backendExternal := BackendExternal{ - Name: b.Name, - BackendUUID: b.BackendUUID, - Protocol: b.GetProtocol(), - Config: b.Driver.GetExternalConfig(), - Storage: make(map[string]interface{}), - Online: b.Online, - State: b.State, - Volumes: make([]string, 0), - } - - for name, pool := range b.Storage { - backendExternal.Storage[name] = pool.ConstructExternal() - } - for volName := range b.Volumes { - backendExternal.Volumes = append(backendExternal.Volumes, volName) - } - return &backendExternal -} - -// Used to store the requisite info for a backend in etcd. Other than -// the configuration, all other data will be reconstructed during the bootstrap -// phase - -type PersistentStorageBackendConfig struct { - OntapConfig *drivers.OntapStorageDriverConfig `json:"ontap_config,omitempty"` - SolidfireConfig *drivers.SolidfireStorageDriverConfig `json:"solidfire_config,omitempty"` - EseriesConfig *drivers.ESeriesStorageDriverConfig `json:"eseries_config,omitempty"` - AWSConfig *drivers.AWSNFSStorageDriverConfig `json:"aws_config,omitempty"` - AzureConfig *drivers.AzureNFSStorageDriverConfig `json:"azure_config,omitempty"` - GCPConfig *drivers.GCPNFSStorageDriverConfig `json:"gcp_config,omitempty"` - FakeStorageDriverConfig *drivers.FakeStorageDriverConfig `json:"fake_config,omitempty"` -} - -type BackendPersistent struct { - Version string `json:"version"` - Config PersistentStorageBackendConfig `json:"config"` - Name string `json:"name"` - BackendUUID string `json:"backendUUID"` - Online bool `json:"online"` - State BackendState `json:"state"` -} - -func (b *Backend) ConstructPersistent() *BackendPersistent { - persistentBackend := &BackendPersistent{ - Version: tridentconfig.OrchestratorAPIVersion, - Config: PersistentStorageBackendConfig{}, - Name: b.Name, - Online: b.Online, - State: b.State, - BackendUUID: b.BackendUUID, - } - b.Driver.StoreConfig(&persistentBackend.Config) - return persistentBackend -} - -// Unfortunately, this method appears to be necessary to avoid arbitrary values -// ending up in the json.RawMessage fields of CommonStorageDriverConfig. -// Ideally, BackendPersistent would just store a serialized config, but -// doing so appears to cause problems with the json.RawMessage fields. -func (p *BackendPersistent) MarshalConfig() (string, error) { - var ( - bytes []byte - err error - ) - switch { - case p.Config.OntapConfig != nil: - bytes, err = json.Marshal(p.Config.OntapConfig) - case p.Config.SolidfireConfig != nil: - bytes, err = json.Marshal(p.Config.SolidfireConfig) - case p.Config.EseriesConfig != nil: - bytes, err = json.Marshal(p.Config.EseriesConfig) - case p.Config.AWSConfig != nil: - bytes, err = json.Marshal(p.Config.AWSConfig) - case p.Config.AzureConfig != nil: - bytes, err = json.Marshal(p.Config.AzureConfig) - case p.Config.GCPConfig != nil: - bytes, err = json.Marshal(p.Config.GCPConfig) - case p.Config.FakeStorageDriverConfig != nil: - bytes, err = json.Marshal(p.Config.FakeStorageDriverConfig) - default: - return "", fmt.Errorf("no recognized config found for backend %s", p.Name) - } - if err != nil { - return "", err - } - return string(bytes), err -} - -// ExtractBackendSecrets clones itself (a BackendPersistent struct), builds a map of any secret data it -// contains (credentials, etc.), clears those fields in the clone, and returns the clone and the map. -func (p *BackendPersistent) ExtractBackendSecrets(secretName string) (*BackendPersistent, map[string]string, error) { - - clone, err := copystructure.Copy(*p) - if err != nil { - return nil, nil, err - } - - backend, ok := clone.(BackendPersistent) - if !ok { - return nil, nil, err - } - - secretName = fmt.Sprintf("secret:%s", secretName) - secretMap := make(map[string]string) - - switch { - case backend.Config.OntapConfig != nil: - secretMap["Username"] = backend.Config.OntapConfig.Username - secretMap["Password"] = backend.Config.OntapConfig.Password - backend.Config.OntapConfig.Username = secretName - backend.Config.OntapConfig.Password = secretName - case p.Config.SolidfireConfig != nil: - secretMap["EndPoint"] = backend.Config.SolidfireConfig.EndPoint - backend.Config.SolidfireConfig.EndPoint = secretName - case p.Config.EseriesConfig != nil: - secretMap["Username"] = backend.Config.EseriesConfig.Username - secretMap["Password"] = backend.Config.EseriesConfig.Password - secretMap["PasswordArray"] = backend.Config.EseriesConfig.PasswordArray - backend.Config.EseriesConfig.Username = secretName - backend.Config.EseriesConfig.Password = secretName - backend.Config.EseriesConfig.PasswordArray = secretName - case p.Config.AWSConfig != nil: - secretMap["APIKey"] = backend.Config.AWSConfig.APIKey - secretMap["SecretKey"] = backend.Config.AWSConfig.SecretKey - backend.Config.AWSConfig.APIKey = secretName - backend.Config.AWSConfig.SecretKey = secretName - case p.Config.AzureConfig != nil: - secretMap["ClientID"] = backend.Config.AzureConfig.ClientID - secretMap["ClientSecret"] = backend.Config.AzureConfig.ClientSecret - backend.Config.AzureConfig.ClientID = secretName - backend.Config.AzureConfig.ClientSecret = secretName - case p.Config.GCPConfig != nil: - secretMap["Private_Key"] = backend.Config.GCPConfig.APIKey.PrivateKey - secretMap["Private_Key_ID"] = backend.Config.GCPConfig.APIKey.PrivateKeyID - backend.Config.GCPConfig.APIKey.PrivateKey = secretName - backend.Config.GCPConfig.APIKey.PrivateKeyID = secretName - case p.Config.FakeStorageDriverConfig != nil: - // Nothing to do - default: - return nil, nil, errors.New("cannot extract secrets, unknown backend type") - } - - return &backend, secretMap, nil -} - -func (p *BackendPersistent) InjectBackendSecrets(secretMap map[string]string) error { - - makeError := func(fieldName string) error { - return fmt.Errorf("%s field missing from backend secrets", fieldName) - } - - var ok bool - - switch { - case p.Config.OntapConfig != nil: - if p.Config.OntapConfig.Username, ok = secretMap["Username"]; !ok { - return makeError("Username") - } - if p.Config.OntapConfig.Password, ok = secretMap["Password"]; !ok { - return makeError("Password") - } - case p.Config.SolidfireConfig != nil: - if p.Config.SolidfireConfig.EndPoint, ok = secretMap["EndPoint"]; !ok { - return makeError("EndPoint") - } - case p.Config.EseriesConfig != nil: - if p.Config.EseriesConfig.Username, ok = secretMap["Username"]; !ok { - return makeError("Username") - } - if p.Config.EseriesConfig.Password, ok = secretMap["Password"]; !ok { - return makeError("Password") - } - if p.Config.EseriesConfig.PasswordArray, ok = secretMap["PasswordArray"]; !ok { - return makeError("PasswordArray") - } - case p.Config.AWSConfig != nil: - if p.Config.AWSConfig.APIKey, ok = secretMap["APIKey"]; !ok { - return makeError("APIKey") - } - if p.Config.AWSConfig.SecretKey, ok = secretMap["SecretKey"]; !ok { - return makeError("SecretKey") - } - case p.Config.AzureConfig != nil: - if p.Config.AzureConfig.ClientID, ok = secretMap["ClientID"]; !ok { - return makeError("ClientID") - } - if p.Config.AzureConfig.ClientSecret, ok = secretMap["ClientSecret"]; !ok { - return makeError("ClientSecret") - } - case p.Config.GCPConfig != nil: - if p.Config.GCPConfig.APIKey.PrivateKey, ok = secretMap["Private_Key"]; !ok { - return makeError("Private_Key") - } - if p.Config.GCPConfig.APIKey.PrivateKeyID, ok = secretMap["Private_Key_ID"]; !ok { - return makeError("Private_Key_ID") - } - case p.Config.FakeStorageDriverConfig != nil: - // Nothing to do - default: - return errors.New("cannot inject secrets, unknown backend type") - } - - return nil -} diff --git a/vendor/github.com/netapp/trident/storage/fake/storage_pool.go b/vendor/github.com/netapp/trident/storage/fake/storage_pool.go deleted file mode 100644 index d8e796128..000000000 --- a/vendor/github.com/netapp/trident/storage/fake/storage_pool.go +++ /dev/null @@ -1,39 +0,0 @@ -package fake - -import ( - "encoding/json" - - sa "github.com/netapp/trident/storage_attribute" -) - -type StoragePool struct { - Attrs map[string]sa.Offer `json:"attributes"` - Bytes uint64 `json:"sizeBytes"` -} - -// UnmarshalJSON implements json.Unmarshaler and allows FakeStoragePool -// to be unmarshaled with the Attrs map correctly defined. -func (p *StoragePool) UnmarshalJSON(data []byte) error { - var tmp struct { - Attrs json.RawMessage `json:"attributes"` - Bytes uint64 `json:"sizeBytes"` - } - - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - p.Attrs, err = sa.UnmarshalOfferMap(tmp.Attrs) - if err != nil { - return err - } - p.Bytes = tmp.Bytes - return nil -} - -func (p *StoragePool) ConstructClone() *StoragePool { - return &StoragePool{ - Attrs: p.Attrs, - Bytes: p.Bytes, - } -} diff --git a/vendor/github.com/netapp/trident/storage/fake/volume.go b/vendor/github.com/netapp/trident/storage/fake/volume.go deleted file mode 100644 index e6380d1cb..000000000 --- a/vendor/github.com/netapp/trident/storage/fake/volume.go +++ /dev/null @@ -1,8 +0,0 @@ -package fake - -type Volume struct { - Name string `json:"name"` - RequestedPool string `json:"requestedPool"` - PhysicalPool string - SizeBytes uint64 `json:"size"` -} diff --git a/vendor/github.com/netapp/trident/storage/snapshot.go b/vendor/github.com/netapp/trident/storage/snapshot.go deleted file mode 100644 index 1b31a3c78..000000000 --- a/vendor/github.com/netapp/trident/storage/snapshot.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storage - -import ( - "fmt" - "regexp" -) - -const SnapshotTimestampFormat = "2006-01-02T15:04:05Z" -const SnapshotNameFormat = "20060102T150405Z" - -var snapshotIDRegex = regexp.MustCompile(`^(?P[^\s/]+)/(?P[^\s/]+)$`) - -type SnapshotConfig struct { - Version string `json:"version,omitempty"` - Name string `json:"name,omitempty"` - InternalName string `json:"internalName,omitempty"` - VolumeName string `json:"volumeName,omitempty"` - VolumeInternalName string `json:"volumeInternalName,omitempty"` -} - -func (c *SnapshotConfig) ID() string { - return MakeSnapshotID(c.VolumeName, c.Name) -} - -func (c *SnapshotConfig) Validate() error { - if c.Name == "" || c.VolumeName == "" { - return fmt.Errorf("the following fields for \"Snapshot\" are mandatory: name and volumeName") - } - return nil -} - -type Snapshot struct { - Config *SnapshotConfig - Created string `json:"dateCreated"` // The UTC time that the snapshot was created, in RFC3339 format - SizeBytes int64 `json:"size"` // The size of the volume at the time the snapshot was created - State SnapshotState -} - -type SnapshotState string - -const ( - SnapshotStateOnline = SnapshotState("online") - SnapshotStateMissingBackend = SnapshotState("missing_backend") - SnapshotStateMissingVolume = SnapshotState("missing_volume") -) - -func (s SnapshotState) IsOnline() bool { - return s == SnapshotStateOnline -} - -func (s SnapshotState) IsMissingBackend() bool { - return s == SnapshotStateMissingBackend -} - -func (s SnapshotState) IsMissingVolume() bool { - return s == SnapshotStateMissingVolume -} - -type SnapshotExternal struct { - Snapshot -} - -func (s *SnapshotExternal) ID() string { - return MakeSnapshotID(s.Config.VolumeName, s.Config.Name) -} - -type SnapshotPersistent struct { - Snapshot -} - -func NewSnapshot(config *SnapshotConfig, created string, sizeBytes int64) *Snapshot { - return &Snapshot{ - Config: config, - Created: created, - SizeBytes: sizeBytes, - State: SnapshotStateOnline, - } -} - -func (s *Snapshot) ConstructExternal() *SnapshotExternal { - clone := s.ConstructClone() - return &SnapshotExternal{Snapshot: *clone} -} - -func (s *Snapshot) ConstructPersistent() *SnapshotPersistent { - clone := s.ConstructClone() - return &SnapshotPersistent{Snapshot: *clone} -} - -func (s *Snapshot) ConstructClone() *Snapshot { - return &Snapshot{ - Config: &SnapshotConfig{ - Version: s.Config.Version, - Name: s.Config.Name, - InternalName: s.Config.InternalName, - VolumeName: s.Config.VolumeName, - VolumeInternalName: s.Config.VolumeInternalName, - }, - Created: s.Created, - SizeBytes: s.SizeBytes, - State: s.State, - } -} - -func (s *Snapshot) ID() string { - return MakeSnapshotID(s.Config.VolumeName, s.Config.Name) -} - -func (s *SnapshotPersistent) ConstructExternal() *SnapshotExternal { - clone := s.ConstructClone() - return &SnapshotExternal{Snapshot: *clone} -} - -func MakeSnapshotID(volumeName, snapshotName string) string { - return fmt.Sprintf("%s/%s", volumeName, snapshotName) -} - -func ParseSnapshotID(snapshotID string) (string, string, error) { - - match := snapshotIDRegex.FindStringSubmatch(snapshotID) - - paramsMap := make(map[string]string) - for i, name := range snapshotIDRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - volumeName, ok := paramsMap["volume"] - if !ok { - return "", "", fmt.Errorf("snapshot ID %s does not contain a volume name", volumeName) - } - snapshotName, ok := paramsMap["snapshot"] - if !ok { - return "", "", fmt.Errorf("snapshot ID %s does not contain a snapshot name", volumeName) - } - - return volumeName, snapshotName, nil -} diff --git a/vendor/github.com/netapp/trident/storage/storage_pool.go b/vendor/github.com/netapp/trident/storage/storage_pool.go deleted file mode 100644 index 84f7baaff..000000000 --- a/vendor/github.com/netapp/trident/storage/storage_pool.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storage - -import ( - "sort" - - sa "github.com/netapp/trident/storage_attribute" -) - -type Pool struct { - Name string - // A Trident storage pool can potentially satisfy more than one storage class. - StorageClasses []string - Backend *Backend - Attributes map[string]sa.Offer // These attributes are used to match storage classes - InternalAttributes map[string]string // These attributes are defined & used internally by storage drivers -} - -func NewStoragePool(backend *Backend, name string) *Pool { - return &Pool{ - Name: name, - StorageClasses: make([]string, 0), - Backend: backend, - Attributes: make(map[string]sa.Offer), - InternalAttributes: make(map[string]string), - } -} - -func (pool *Pool) AddStorageClass(class string) { - // Note that this function should get called once per storage class - // affecting the volume; thus, we don't need to check for duplicates. - pool.StorageClasses = append(pool.StorageClasses, class) -} - -func (pool *Pool) RemoveStorageClass(class string) bool { - found := false - for i, name := range pool.StorageClasses { - if name == class { - pool.StorageClasses = append(pool.StorageClasses[:i], - pool.StorageClasses[i+1:]...) - found = true - break - } - } - return found -} - -type PoolExternal struct { - Name string `json:"name"` - StorageClasses []string `json:"storageClasses"` - //TODO: can't have an interface here for unmarshalling - Attributes map[string]sa.Offer `json:"storageAttributes"` -} - -func (pool *Pool) ConstructExternal() *PoolExternal { - external := &PoolExternal{ - Name: pool.Name, - StorageClasses: pool.StorageClasses, - Attributes: pool.Attributes, - } - - // We want to sort these so that the output remains consistent; - // there are cases where the order won't always be the same. - sort.Strings(external.StorageClasses) - return external -} diff --git a/vendor/github.com/netapp/trident/storage/volume.go b/vendor/github.com/netapp/trident/storage/volume.go deleted file mode 100644 index 5112ce583..000000000 --- a/vendor/github.com/netapp/trident/storage/volume.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package storage - -import ( - "bytes" - "encoding/base64" - "encoding/gob" - "fmt" - "strings" - - "github.com/netapp/trident/config" - "github.com/netapp/trident/utils" -) - -type VolumeConfig struct { - Version string `json:"version"` - Name string `json:"name"` - InternalName string `json:"internalName"` - Size string `json:"size"` - Protocol config.Protocol `json:"protocol"` - SpaceReserve string `json:"spaceReserve"` - SecurityStyle string `json:"securityStyle"` - SnapshotPolicy string `json:"snapshotPolicy,omitempty"` - SnapshotReserve string `json:"snapshotReserve,omitempty"` - SnapshotDir string `json:"snapshotDirectory,omitempty"` - ExportPolicy string `json:"exportPolicy,omitempty"` - UnixPermissions string `json:"unixPermissions,omitempty"` - StorageClass string `json:"storageClass,omitempty"` - AccessMode config.AccessMode `json:"accessMode,omitempty"` - VolumeMode config.VolumeMode `json:"volumeMode,omitempty"` - AccessInfo utils.VolumeAccessInfo `json:"accessInformation"` - BlockSize string `json:"blockSize"` - FileSystem string `json:"fileSystem"` - Encryption string `json:"encryption"` - CloneSourceVolume string `json:"cloneSourceVolume"` - CloneSourceVolumeInternal string `json:"cloneSourceVolumeInternal"` - CloneSourceSnapshot string `json:"cloneSourceSnapshot"` - SplitOnClone string `json:"splitOnClone"` - QoS string `json:"qos,omitempty"` - QoSType string `json:"type,omitempty"` - ServiceLevel string `json:"serviceLevel,omitempty"` - Network string `json:"network,omitempty"` - ImportOriginalName string `json:"importOriginalName,omitempty"` - ImportBackendUUID string `json:"importBackendUUID,omitempty"` - ImportNotManaged bool `json:"importNotManaged,omitempty"` -} - -func (c *VolumeConfig) Validate() error { - if c.Name == "" || c.Size == "" { - return fmt.Errorf("the following fields for \"Volume\" are mandatory: name and size") - } - if !config.IsValidProtocol(c.Protocol) { - return fmt.Errorf("%v is an usupported protocol! Acceptable values: "+ - "%s", c.Protocol, - strings.Join([]string(config.GetValidProtocolNames()), ", "), - ) - } - return nil -} - -func (c *VolumeConfig) ConstructClone() *VolumeConfig { - clone := &VolumeConfig{} - buff := new(bytes.Buffer) - enc := gob.NewEncoder(buff) - dec := gob.NewDecoder(buff) - _ = enc.Encode(c) - _ = dec.Decode(clone) - return clone -} - -type Volume struct { - Config *VolumeConfig - BackendUUID string // UUID of the storage backend - Pool string // Name of the pool on which this volume was first provisioned - Orphaned bool // An Orphaned volume isn't currently tracked by the storage backend - State VolumeState -} - -type VolumeState string - -const ( - VolumeStateUnknown = VolumeState("unknown") - VolumeStateOnline = VolumeState("online") - VolumeStateDeleting = VolumeState("deleting") - VolumeStateUpgrading = VolumeState("upgrading") - VolumeStateMissingBackend = VolumeState("missing_backend") - // TODO should Orphaned be moved to a VolumeState? -) - -func (s VolumeState) String() string { - switch s { - case VolumeStateUnknown, VolumeStateOnline, VolumeStateDeleting: - return string(s) - default: - return "unknown" - } -} - -func (s VolumeState) IsUnknown() bool { - switch s { - case VolumeStateOnline, VolumeStateDeleting: - return false - case VolumeStateUnknown: - return true - default: - return true - } -} - -func (s VolumeState) IsOnline() bool { - return s == VolumeStateOnline -} - -func (s VolumeState) IsDeleting() bool { - return s == VolumeStateDeleting -} - -func (s VolumeState) IsMissingBackend() bool { - return s == VolumeStateMissingBackend -} - -func NewVolume(conf *VolumeConfig, backendUUID string, pool string, orphaned bool) *Volume { - return &Volume{ - Config: conf, - BackendUUID: backendUUID, - Pool: pool, - Orphaned: orphaned, - State: VolumeStateOnline, - } -} - -type VolumeExternal struct { - Config *VolumeConfig - Backend string `json:"backend"` // replaced w/ backendUUID, remains to read old records - BackendUUID string `json:"backendUUID"` // UUID of the storage backend - Pool string `json:"pool"` - Orphaned bool `json:"orphaned"` - State VolumeState `json:"state"` -} - -func (v *VolumeExternal) GetCHAPSecretName() string { - secretName := fmt.Sprintf("trident-chap-%v-%v", v.BackendUUID, v.Config.AccessInfo.IscsiUsername) - secretName = strings.Replace(secretName, "_", "-", -1) - secretName = strings.Replace(secretName, ".", "-", -1) - secretName = strings.ToLower(secretName) - return secretName -} - -func (v *Volume) ConstructExternal() *VolumeExternal { - return &VolumeExternal{ - Config: v.Config, - BackendUUID: v.BackendUUID, - Pool: v.Pool, - Orphaned: v.Orphaned, - State: v.State, - } -} - -// VolumeExternalWrapper is used to return volumes and errors via channels between goroutines -type VolumeExternalWrapper struct { - Volume *VolumeExternal - Error error -} - -type ImportVolumeRequest struct { - Backend string `json:"backend"` - InternalName string `json:"internalName"` - NoManage bool `json:"noManage"` - PVCData string `json:"pvcData"` // Opaque, base64-encoded -} - -func (r *ImportVolumeRequest) Validate() error { - if r.Backend == "" || r.InternalName == "" { - return fmt.Errorf("the following fields are mandatory: backend and internalName") - } - if _, err := base64.StdEncoding.DecodeString(r.PVCData); err != nil { - return fmt.Errorf("the pvcData field does not contain valid base64-encoded data: %v", err) - } - return nil -} - -type UpgradeVolumeRequest struct { - Type string `json:"type"` - Volume string `json:"volume"` -} - -func (r *UpgradeVolumeRequest) Validate() error { - if r.Volume == "" { - return fmt.Errorf("the following field is mandatory: volume") - } - if r.Type != "csi" { - return fmt.Errorf("the only supported type for volume upgrade is 'csi'") - } - return nil -} - -type ByVolumeExternalName []*VolumeExternal - -func (a ByVolumeExternalName) Len() int { return len(a) } -func (a ByVolumeExternalName) Less(i, j int) bool { return a[i].Config.Name < a[j].Config.Name } -func (a ByVolumeExternalName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/netapp/trident/storage/volume_transaction.go b/vendor/github.com/netapp/trident/storage/volume_transaction.go deleted file mode 100644 index 3737391ae..000000000 --- a/vendor/github.com/netapp/trident/storage/volume_transaction.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package storage - -import v1 "k8s.io/api/core/v1" - -type VolumeOperation string - -const ( - AddVolume VolumeOperation = "addVolume" - DeleteVolume VolumeOperation = "deleteVolume" - ImportVolume VolumeOperation = "importVolume" - ResizeVolume VolumeOperation = "resizeVolume" - UpgradeVolume VolumeOperation = "upgradeVolume" - AddSnapshot VolumeOperation = "addSnapshot" - DeleteSnapshot VolumeOperation = "deleteSnapshot" -) - -type VolumeTransaction struct { - Config *VolumeConfig - SnapshotConfig *SnapshotConfig - PVUpgradeConfig *PVUpgradeConfig - Op VolumeOperation -} - -type PVUpgradeConfig struct { - PVCConfig *v1.PersistentVolumeClaim `json:"pvcConfig,omitempty"` - PVConfig *v1.PersistentVolume `json:"pvConfig,omitempty"` - OwnedPodsForPVC []string `json:"ownedPodsForPVC,omitempty"` -} - -// Name returns a unique identifier for the VolumeTransaction. Volume transactions should only -// be identified by their name, while snapshot transactions should be identified by their name as -// well as their volume name. It's possible that some situations will leave a delete transaction -// dangling; an add transaction should overwrite this. -func (t *VolumeTransaction) Name() string { - switch t.Op { - case AddSnapshot, DeleteSnapshot: - return t.SnapshotConfig.ID() - default: - return t.Config.Name - } -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/bool.go b/vendor/github.com/netapp/trident/storage_attribute/bool.go deleted file mode 100644 index cf39f6d17..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/bool.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -import ( - "fmt" -) - -func NewBoolOffer(offer bool) Offer { - return &boolOffer{ - Offer: offer, - } -} - -func NewBoolOfferFromOffers(offers ...Offer) Offer { - - anyTrueOffer := false - - for _, offer := range offers { - if bOffer, ok := offer.(*boolOffer); ok { - if bOffer.Offer { - anyTrueOffer = true - } - } - } - - // A boolOffer must hold either a true or false value. If any of the - // supplied offers are true, the combined result must be true. Otherwise, - // the supplied offers were all false, in which case the combined result - // must be false. - - if anyTrueOffer { - return &boolOffer{Offer: true} - } else { - return &boolOffer{Offer: false} - } -} - -// Matches is a boolean offer of true matches any request; a boolean offer of false -// only matches a false request. This assumes that the requested parameter -// will be passed into the driver. -func (o *boolOffer) Matches(r Request) bool { - br, ok := r.(*boolRequest) - if !ok { - return false - } - if o.Offer { - return true - } - return br.Request == o.Offer -} - -func (o *boolOffer) String() string { - return fmt.Sprintf("{Offer: %t}", o.Offer) -} - -func (o *boolOffer) ToString() string { - return fmt.Sprintf("%t", o.Offer) -} - -func NewBoolRequest(request bool) Request { - return &boolRequest{ - Request: request, - } -} - -func (r *boolRequest) Value() interface{} { - return r.Request -} - -func (r *boolRequest) GetType() Type { - return boolType -} - -func (r *boolRequest) String() string { - return fmt.Sprintf("%t", r.Request) -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/common_attributes.go b/vendor/github.com/netapp/trident/storage_attribute/common_attributes.go deleted file mode 100644 index 2a99a14a9..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/common_attributes.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -const ( - // Constants for integer storage category attributes - IOPS = "IOPS" - - // Constants for boolean storage category attributes - Snapshots = "snapshots" - Clones = "clones" - Encryption = "encryption" - - // Constants for string list attributes - ProvisioningType = "provisioningType" - BackendType = "backendType" - Media = "media" - Region = "region" - Zone = "zone" - - // Constants for label attributes - Labels = "labels" - Selector = "selector" - - // Testing constants - RecoveryTest = "recoveryTest" - UniqueOptions = "uniqueOptions" - TestingAttribute = "testingAttribute" - NonexistentBool = "nonexistentBool" - - // Values for media - HDD = "hdd" - SSD = "ssd" - Hybrid = "hybrid" - - // Values for provisioning type - Thick = "thick" - Thin = "thin" - - RequiredStorage = "requiredStorage" // deprecated, use additionalStoragePools - StoragePools = "storagePools" - AdditionalStoragePools = "additionalStoragePools" - ExcludeStoragePools = "excludeStoragePools" -) - -var attrTypes = map[string]Type{ - IOPS: intType, - Snapshots: boolType, - Clones: boolType, - Encryption: boolType, - ProvisioningType: stringType, - BackendType: stringType, - Media: stringType, - Region: stringType, - Zone: stringType, - Labels: labelType, - Selector: labelType, - RecoveryTest: boolType, - UniqueOptions: stringType, - TestingAttribute: boolType, - NonexistentBool: boolType, -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/int.go b/vendor/github.com/netapp/trident/storage_attribute/int.go deleted file mode 100644 index 7e69fc891..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/int.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -import ( - "fmt" -) - -func NewIntOffer(min, max int) Offer { - return &intOffer{ - Min: min, - Max: max, - } -} - -func (o *intOffer) Matches(r Request) bool { - ir, ok := r.(*intRequest) - if !ok { - return false - } - return ir.Request >= o.Min && ir.Request <= o.Max -} - -func (o *intOffer) String() string { - return fmt.Sprintf("{Min: %d, Max: %d}", o.Min, o.Max) -} - -func (o *intOffer) ToString() string { - return o.String() -} - -func NewIntRequest(request int) Request { - return &intRequest{ - Request: request, - } -} - -func (r *intRequest) Value() interface{} { - return r.Request -} - -func (r *intRequest) GetType() Type { - return intType -} - -func (r *intRequest) String() string { - return fmt.Sprintf("%d", r.Request) -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/label.go b/vendor/github.com/netapp/trident/storage_attribute/label.go deleted file mode 100644 index c8e6f5995..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/label.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -import ( - "fmt" - "regexp" - "strings" - - log "github.com/sirupsen/logrus" -) - -var ( - labelEqualRegex = regexp.MustCompile(`^(?P[\w]+)\s*={1,2}\s*(?P[\w]+)$`) - labelNotEqualRegex = regexp.MustCompile(`^(?P[\w]+)\s*!=\s*(?P[\w]+)$`) - labelInSetRegex = regexp.MustCompile(`^(?P[\w]+)\s+in\s+[(](?P[\s\w,]+)[)]$`) - labelNotInSetRegex = regexp.MustCompile(`^(?P[\w]+)\s+notin\s+[(](?P[\s\w,]+)[)]$`) - labelExistsRegex = regexp.MustCompile(`^(?P[\w]+)$`) - labelNotExistsRegex = regexp.MustCompile(`^!(?P[\w]+)$`) -) - -func NewLabelOffer(labelMaps ...map[string]string) Offer { - - // Combine multiple maps into a single map - offers := make(map[string]string) - - for _, labelMap := range labelMaps { - for k, v := range labelMap { - offers[k] = v - } - } - - log.WithField("offers", offers).Debug("NewLabelOffer") - - return &labelOffer{ - Offers: offers, - } -} - -func (o *labelOffer) Matches(r Request) bool { - - log.WithFields(log.Fields{ - "request": r, - "offers": o.Offers, - }).Debug("Matches") - - // Check that this is a label request - request, ok := r.(*labelRequest) - if !ok { - return false - } - - // Check that each selector finds a match among the offered labels - for _, selector := range request.selectors { - if !selector.Matches(*o) { - return false - } - } - - return true -} - -func (o *labelOffer) String() string { - return fmt.Sprintf("{Offers: %v}", o.Offers) -} - -func (o *labelOffer) ToString() string { - return fmt.Sprintf("%v", o.Offers) -} - -func NewLabelRequest(request string) (Request, error) { - - log.WithField("request", request).Debug("NewLabelRequest") - - if len(request) == 0 { - return nil, fmt.Errorf("label selector may not be empty") - } - - // Split selector line into individual selectors and parse each according to its type - var selectors []labelSelector - for _, r := range strings.Split(request, ";") { - - r = strings.TrimSpace(r) - - if labelEqualRegex.MatchString(r) { - selectors = append(selectors, newLabelEqualRequest(r)) - } else if labelNotEqualRegex.MatchString(r) { - selectors = append(selectors, newLabelNotEqualRequest(r)) - } else if labelInSetRegex.MatchString(r) { - selectors = append(selectors, newLabelInSetRequest(r)) - } else if labelNotInSetRegex.MatchString(r) { - selectors = append(selectors, newLabelNotInSetRequest(r)) - } else if labelExistsRegex.MatchString(r) { - selectors = append(selectors, newLabelExistsRequest(r)) - } else if labelNotExistsRegex.MatchString(r) { - selectors = append(selectors, newLabelNotExistsRequest(r)) - } else { - return nil, fmt.Errorf("invalid label selector: %s", r) - } - } - - return &labelRequest{ - Request: request, - selectors: selectors, - }, nil -} - -func NewLabelRequestMustCompile(request string) Request { - - r, err := NewLabelRequest(request) - if err != nil { - panic(err) - } - return r -} - -func (r *labelRequest) Value() interface{} { - return r.Request -} - -func (r *labelRequest) GetType() Type { - return labelType -} - -func (r *labelRequest) String() string { - return r.Request -} - -// Common interface for the various types of label requests (==, !=, in, notin, exists) -type labelSelector interface { - Matches(offer labelOffer) bool -} - -///////////////////////////////////////////////////////////////////////////// -// labelSelector for equality (equals) -///////////////////////////////////////////////////////////////////////////// - -type labelEqualRequest struct { - labelName string - labelValue string -} - -func newLabelEqualRequest(request string) labelSelector { - - match := labelEqualRegex.FindStringSubmatch(request) - paramsMap := make(map[string]string) - for i, name := range labelEqualRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - return &labelEqualRequest{ - labelName: paramsMap["labelName"], - labelValue: paramsMap["labelValue"], - } -} - -func (r *labelEqualRequest) Matches(offer labelOffer) bool { - for labelName, labelValue := range offer.Offers { - if r.labelName == labelName && r.labelValue == labelValue { - return true - } - } - return false -} - -///////////////////////////////////////////////////////////////////////////// -// labelSelector for equality (not equals) -///////////////////////////////////////////////////////////////////////////// - -type labelNotEqualRequest struct { - labelName string - labelValue string -} - -func newLabelNotEqualRequest(request string) labelSelector { - - match := labelNotEqualRegex.FindStringSubmatch(request) - paramsMap := make(map[string]string) - for i, name := range labelNotEqualRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - return &labelNotEqualRequest{ - labelName: paramsMap["labelName"], - labelValue: paramsMap["labelValue"], - } -} - -func (r *labelNotEqualRequest) Matches(offer labelOffer) bool { - for labelName, labelValue := range offer.Offers { - if r.labelName == labelName && r.labelValue != labelValue { - return true - } - } - return false -} - -///////////////////////////////////////////////////////////////////////////// -// labelSelector for sets (in) -///////////////////////////////////////////////////////////////////////////// - -type labelInSetRequest struct { - labelName string - labelSet []string -} - -func newLabelInSetRequest(request string) labelSelector { - - match := labelInSetRegex.FindStringSubmatch(request) - paramsMap := make(map[string]string) - for i, name := range labelInSetRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - labelSet := make([]string, 0) - for _, value := range strings.Split(paramsMap["labelSet"], ",") { - value := strings.TrimSpace(value) - if value != "" { - labelSet = append(labelSet, value) - } - } - - return &labelInSetRequest{ - labelName: paramsMap["labelName"], - labelSet: labelSet, - } -} - -func (r *labelInSetRequest) Matches(offer labelOffer) bool { - for labelName, labelValue := range offer.Offers { - if r.labelName == labelName { - // Found match in key - for _, setValue := range r.labelSet { - if setValue == labelValue { - // Found match in values set - return true - } - } - } - } - return false -} - -///////////////////////////////////////////////////////////////////////////// -// labelSelector for sets (notin) -///////////////////////////////////////////////////////////////////////////// - -type labelNotInSetRequest struct { - labelName string - labelSet []string -} - -func newLabelNotInSetRequest(request string) labelSelector { - - match := labelNotInSetRegex.FindStringSubmatch(request) - paramsMap := make(map[string]string) - for i, name := range labelNotInSetRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - labelSet := make([]string, 0) - for _, value := range strings.Split(paramsMap["labelSet"], ",") { - value := strings.TrimSpace(value) - if value != "" { - labelSet = append(labelSet, value) - } - } - - return &labelNotInSetRequest{ - labelName: paramsMap["labelName"], - labelSet: labelSet, - } -} - -func (r *labelNotInSetRequest) Matches(offer labelOffer) bool { - for labelName, labelValue := range offer.Offers { - if r.labelName == labelName { - // Found match in key - for _, setValue := range r.labelSet { - if setValue == labelValue { - // Found match in set --> no match - return false - } - } - // Found key but no match in set --> match - return true - } - } - // Found no match in key --> match - return true -} - -///////////////////////////////////////////////////////////////////////////// -// labelSelector for sets (exists) -///////////////////////////////////////////////////////////////////////////// - -type labelExistsRequest struct { - labelName string -} - -func newLabelExistsRequest(request string) labelSelector { - - match := labelExistsRegex.FindStringSubmatch(request) - paramsMap := make(map[string]string) - for i, name := range labelExistsRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - return &labelExistsRequest{ - labelName: paramsMap["labelName"], - } -} - -func (r *labelExistsRequest) Matches(offer labelOffer) bool { - for labelName := range offer.Offers { - if r.labelName == labelName { - // Found match in key --> match - return true - } - } - // Found no match in key --> no match - return false -} - -///////////////////////////////////////////////////////////////////////////// -// labelSelector for sets (not exists) -///////////////////////////////////////////////////////////////////////////// - -type labelNotExistsRequest struct { - labelName string -} - -func newLabelNotExistsRequest(request string) labelSelector { - - match := labelNotExistsRegex.FindStringSubmatch(request) - paramsMap := make(map[string]string) - for i, name := range labelNotExistsRegex.SubexpNames() { - if i > 0 && i <= len(match) { - paramsMap[name] = match[i] - } - } - - return &labelNotExistsRequest{ - labelName: paramsMap["labelName"], - } -} - -func (r *labelNotExistsRequest) Matches(offer labelOffer) bool { - for labelName := range offer.Offers { - if r.labelName == labelName { - // Found match in key --> no match - return false - } - } - // Found no match in key --> match - return true -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/offer.go b/vendor/github.com/netapp/trident/storage_attribute/offer.go deleted file mode 100644 index e227d9bde..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/offer.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -import ( - "encoding/json" - "fmt" -) - -func UnmarshalOfferMap(mapJSON json.RawMessage) (map[string]Offer, error) { - var tmp map[string]json.RawMessage - ret := make(map[string]Offer) - - err := json.Unmarshal(mapJSON, &tmp) - if err != nil { - return nil, fmt.Errorf("unable to unmarshal map: %v", err) - } - for name, rawAttr := range tmp { - var ( - final Offer - ) - - baseType, ok := attrTypes[name] - if !ok { - return nil, fmt.Errorf("unknown storage attribute: %s", name) - } - switch { - case baseType == boolType: - final = new(boolOffer) - case baseType == intType: - final = new(intOffer) - case baseType == stringType: - final = new(stringOffer) - case baseType == labelType: - final = new(labelOffer) - default: - return nil, fmt.Errorf("offer %s has unrecognized type %s", name, - baseType) - } - err = json.Unmarshal(rawAttr, final) - if err != nil { - return nil, fmt.Errorf("unable to fully unmarshal request %s: %v", - name, err) - } - ret[name] = final - } - - return ret, nil -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/request.go b/vendor/github.com/netapp/trident/storage_attribute/request.go deleted file mode 100644 index 7f7df7dd2..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/request.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -func UnmarshalRequestMap(mapJSON json.RawMessage) ( - map[string]Request, error, -) { - var tmp map[string]string - ret := make(map[string]Request) - - if mapJSON == nil { - return nil, nil - } - err := json.Unmarshal(mapJSON, &tmp) - if err != nil { - return nil, fmt.Errorf("unable to unmarshal map: %v", err) - } - for name, stringVal := range tmp { - ret[name], err = CreateAttributeRequestFromAttributeValue(name, stringVal) - if err != nil { - return nil, err - } - } - return ret, nil -} - -func MarshalRequestMap(requestMap map[string]Request) ([]byte, error) { - if requestMap == nil { - return nil, nil - } - genericMap := make(map[string]string, len(requestMap)) - for k, v := range requestMap { - genericMap[k] = v.String() - } - return json.Marshal(genericMap) -} - -func CreateAttributeRequestFromAttributeValue(name, val string) (Request, error) { - var req Request - var err error - - valType, ok := attrTypes[name] - if !ok { - return nil, fmt.Errorf("unrecognized storage attribute: %s", name) - } - switch valType { - case boolType: - v, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf("storage attribute value (%s) doesn't match the specified type (%s)", val, valType) - } - req = NewBoolRequest(v) - case intType: - v, err := strconv.ParseInt(val, 10, 0) - if err != nil { - return nil, fmt.Errorf("storage attribute value (%s) doesn't match the specified type (%s)", val, valType) - } - req = NewIntRequest(int(v)) - case stringType: - req = NewStringRequest(val) - case labelType: - req, err = NewLabelRequest(val) - if err != nil { - return nil, fmt.Errorf("storage attribute value (%s) doesn't match the specified type (%s)", val, valType) - } - default: - return nil, fmt.Errorf("unrecognized type for a storage attribute request: %s", valType) - } - return req, nil -} - -func CreateBackendStoragePoolsMapFromEncodedString( - arg string, -) (map[string][]string, error) { - backendPoolsMap := make(map[string][]string) - backendPoolsList := strings.Split(arg, ";") - for _, backendPools := range backendPoolsList { - vals := strings.SplitN(backendPools, ":", 2) - if len(vals) != 2 || vals[0] == "" || vals[1] == "" { - return nil, fmt.Errorf("the encoded backend-storage pool string does not have the right format") - } - backend := vals[0] - Pools := strings.Split(vals[1], ",") - backendPoolsMap[backend] = Pools - } - return backendPoolsMap, nil -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/string.go b/vendor/github.com/netapp/trident/storage_attribute/string.go deleted file mode 100644 index 91197ba80..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/string.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -import ( - "fmt" - "strings" -) - -func NewStringOffer(offers ...string) Offer { - return &stringOffer{ - Offers: offers, - } -} - -func NewStringOfferFromOffers(offers ...Offer) Offer { - - // Use a map as a set to deduplicate the string offers - offerMap := make(map[string]struct{}) - - // For each offer that is a stringOffer, place its contents in the map - for _, offer := range offers { - if sOffer, ok := offer.(*stringOffer); ok { - for _, s := range sOffer.Offers { - offerMap[s] = struct{}{} - } - } - } - - offerKeys := make([]string, 0) - for key := range offerMap { - offerKeys = append(offerKeys, key) - } - - return &stringOffer{ - Offers: offerKeys, - } -} - -func (o *stringOffer) Matches(r Request) bool { - sr, ok := r.(*stringRequest) - if !ok { - return false - } - for _, s := range o.Offers { - if s == sr.Request { - return true - } - } - return false -} - -func (o *stringOffer) String() string { - return fmt.Sprintf("{Offers: %s}", strings.Join(o.Offers, ",")) -} - -func (o *stringOffer) ToString() string { - return fmt.Sprintf("%s", strings.Join(o.Offers, ",")) -} - -func NewStringRequest(request string) Request { - return &stringRequest{ - Request: request, - } -} - -func (r *stringRequest) Value() interface{} { - return r.Request -} - -func (r *stringRequest) GetType() Type { - return stringType -} - -func (r *stringRequest) String() string { - return r.Request -} diff --git a/vendor/github.com/netapp/trident/storage_attribute/types.go b/vendor/github.com/netapp/trident/storage_attribute/types.go deleted file mode 100644 index 88c60509e..000000000 --- a/vendor/github.com/netapp/trident/storage_attribute/types.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package storageattribute - -type Offer interface { - Matches(requested Request) bool - ToString() string -} - -// At the moment, there aren't any terribly useful methods to put here, but -// there might be. This is more here for symmetry at the moment. -type Request interface { - GetType() Type - Value() interface{} - String() string -} - -type Type string - -const ( - intType Type = "int" - boolType Type = "bool" - stringType Type = "string" - labelType Type = "label" -) - -type intOffer struct { - Min int `json:"min"` - Max int `json:"max"` -} - -type intRequest struct { - Request int `json:"request"` -} - -type boolOffer struct { - Offer bool `json:"offer"` -} - -type boolRequest struct { - Request bool `json:"request"` -} - -type stringOffer struct { - Offers []string `json:"offer"` -} - -type stringRequest struct { - Request string `json:"request"` -} - -type labelOffer struct { - Offers map[string]string `json:"offer"` -} - -type labelRequest struct { - Request string `json:"request"` - selectors []labelSelector -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/common.go b/vendor/github.com/netapp/trident/storage_drivers/common.go deleted file mode 100644 index f19f8d74a..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/common.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package storagedrivers - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - log "github.com/sirupsen/logrus" - - trident "github.com/netapp/trident/config" - "github.com/netapp/trident/utils" -) - -// ValidateCommonSettings attempts to "partially" decode the JSON into just the settings in CommonStorageDriverConfig -func ValidateCommonSettings(configJSON string) (*CommonStorageDriverConfig, error) { - - config := &CommonStorageDriverConfig{} - - // Decode configJSON into config object - err := json.Unmarshal([]byte(configJSON), &config) - if err != nil { - return nil, fmt.Errorf("could not parse JSON configuration: %v", err) - } - - // Load storage drivers and validate the one specified actually exists - if config.StorageDriverName == "" { - return nil, errors.New("missing storage driver name in configuration file") - } - - // Validate config file version information - if config.Version != ConfigVersion { - return nil, fmt.Errorf("unexpected config file version; found %d, expected %d", config.Version, ConfigVersion) - } - - // Warn about ignored fields in common config if any are set - if config.DisableDelete { - log.WithFields(log.Fields{ - "driverName": config.StorageDriverName, - }).Warn("disableDelete set in backend config. This will be ignored.") - } - if config.Debug { - log.Warnf("The debug setting in the configuration file is now ignored; " + - "use the command line --debug switch instead.") - } - - // The storage prefix may have three states: nil (no prefix specified, drivers will use - // a default prefix), "" (specified as an empty string, drivers will use no prefix), and - // "" (a prefix specified in the backend config file). For historical reasons, - // the value is serialized as a raw JSON string (a byte array), and it may take multiple - // forms. An empty byte array, or an array with the ASCII values {} or null, is interpreted - // as nil (no prefix specified). A byte array containing two double-quote characters ("") - // is an empty string. A byte array containing characters enclosed in double quotes is - // a specified prefix. Anything else is rejected as invalid. The storage prefix is exposed - // to the rest of the code in StoragePrefix; only serialization code such as this should - // be concerned with StoragePrefixRaw. - - if len(config.StoragePrefixRaw) > 0 { - rawPrefix := string(config.StoragePrefixRaw) - if rawPrefix == "{}" || rawPrefix == "null" { - config.StoragePrefix = nil - log.Debugf("Storage prefix is %s, will use default prefix.", rawPrefix) - } else if rawPrefix == "\"\"" { - empty := "" - config.StoragePrefix = &empty - log.Debug("Storage prefix is empty, will use no prefix.") - } else if strings.HasPrefix(rawPrefix, "\"") && strings.HasSuffix(rawPrefix, "\"") { - prefix := string(config.StoragePrefixRaw[1 : len(config.StoragePrefixRaw)-1]) - config.StoragePrefix = &prefix - log.WithField("storagePrefix", prefix).Debug("Parsed storage prefix.") - } else { - return nil, fmt.Errorf("invalid value for storage prefix: %v", config.StoragePrefixRaw) - } - } else { - config.StoragePrefix = nil - log.Debug("Storage prefix is absent, will use default prefix.") - } - - // Validate volume size limit (if set) - if config.LimitVolumeSize != "" { - if _, err = utils.ConvertSizeToBytes(config.LimitVolumeSize); err != nil { - return nil, fmt.Errorf("invalid value for limitVolumeSize: %v", config.LimitVolumeSize) - } - } - - log.Debugf("Parsed commonConfig: %+v", *config) - - return config, nil -} - -func GetDefaultStoragePrefix(context trident.DriverContext) string { - switch context { - default: - return "" - case trident.ContextKubernetes, trident.ContextCSI: - return DefaultTridentStoragePrefix - case trident.ContextDocker: - return DefaultDockerStoragePrefix - } -} - -func GetDefaultIgroupName(context trident.DriverContext) string { - switch context { - default: - fallthrough - case trident.ContextKubernetes, trident.ContextCSI: - return DefaultTridentIgroupName - case trident.ContextDocker: - return DefaultDockerIgroupName - } -} - -func SanitizeCommonStorageDriverConfig(c *CommonStorageDriverConfig) { - if c != nil && c.StoragePrefixRaw == nil { - c.StoragePrefixRaw = json.RawMessage("{}") - } -} - -func GetCommonInternalVolumeName(c *CommonStorageDriverConfig, name string) string { - - prefixToUse := trident.OrchestratorName - - // If a prefix was specified in the configuration, use that. - if c.StoragePrefix != nil { - prefixToUse = *c.StoragePrefix - } - - // Special case an empty prefix so that we don't get a delimiter in front. - if prefixToUse == "" { - return name - } - - return fmt.Sprintf("%s-%s", prefixToUse, name) -} - -// CheckVolumeSizeLimits if a limit has been set, ensures the requestedSize is under it. -func CheckVolumeSizeLimits(requestedSizeInt uint64, config *CommonStorageDriverConfig) (bool, uint64, error) { - - requestedSize := float64(requestedSizeInt) - // If the user specified a limit for volume size, parse and enforce it - limitVolumeSize := config.LimitVolumeSize - log.WithFields(log.Fields{ - "limitVolumeSize": limitVolumeSize, - }).Debugf("Limits") - if limitVolumeSize == "" { - log.Debugf("No limits specified, not limiting volume size") - return false, 0, nil - } - - volumeSizeLimit := uint64(0) - volumeSizeLimitStr, parseErr := utils.ConvertSizeToBytes(limitVolumeSize) - if parseErr != nil { - return false, 0, fmt.Errorf("error parsing limitVolumeSize: %v", parseErr) - } - volumeSizeLimit, _ = strconv.ParseUint(volumeSizeLimitStr, 10, 64) - - log.WithFields(log.Fields{ - "limitVolumeSize": limitVolumeSize, - "volumeSizeLimit": volumeSizeLimit, - "requestedSizeBytes": requestedSize, - }).Debugf("Comparing limits") - - if requestedSize > float64(volumeSizeLimit) { - return true, volumeSizeLimit, fmt.Errorf("requested size: %1.f > the size limit: %d", requestedSize, volumeSizeLimit) - } - - return true, volumeSizeLimit, nil -} - -// Clone will create a copy of the source object and store it into the destination object (which must be a pointer) -func Clone(source, destination interface{}) { - if reflect.TypeOf(destination).Kind() != reflect.Ptr { - log.Error("storage_drivers.Clone, destination parameter must be a pointer") - } - - buff := new(bytes.Buffer) - enc := gob.NewEncoder(buff) - dec := gob.NewDecoder(buff) - enc.Encode(source) - dec.Decode(destination) -} - -// CheckSupportedFilesystem checks for a supported file system type -func CheckSupportedFilesystem(fs string, volumeInternalName string) (string, error) { - fstype := strings.ToLower(fs) - switch fstype { - case FsXfs, FsExt3, FsExt4, FsRaw: - log.WithFields(log.Fields{"fileSystemType": fstype, "name": volumeInternalName}).Debug("Filesystem format.") - return fstype, nil - default: - return fstype, fmt.Errorf("unsupported fileSystemType option: %s", fstype) - } -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/config.go b/vendor/github.com/netapp/trident/storage_drivers/config.go deleted file mode 100644 index 68f09d05e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/config.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package storagedrivers - -// ConfigVersion is the expected version specified in the config file -const ConfigVersion = 1 - -// Default storage prefix -const DefaultDockerStoragePrefix = "netappdvp_" -const DefaultTridentStoragePrefix = "trident_" - -// Default SAN igroup / host group names -const DefaultDockerIgroupName = "netappdvp" -const DefaultTridentIgroupName = "trident" - -// Storage driver names specified in the config file, etc. -const ( - EseriesIscsiStorageDriverName = "eseries-iscsi" - OntapNASStorageDriverName = "ontap-nas" - OntapNASFlexGroupStorageDriverName = "ontap-nas-flexgroup" - OntapNASQtreeStorageDriverName = "ontap-nas-economy" - OntapSANStorageDriverName = "ontap-san" - OntapSANEconomyStorageDriverName = "ontap-san-economy" - SolidfireSANStorageDriverName = "solidfire-san" - AWSNFSStorageDriverName = "aws-cvs" - AzureNFSStorageDriverName = "azure-netapp-files" - GCPNFSStorageDriverName = "gcp-cvs" - FakeStorageDriverName = "fake" -) - -// Filesystem types -const ( - FsXfs = "xfs" - FsExt3 = "ext3" - FsExt4 = "ext4" - FsRaw = "raw" -) - -// Default Filesystem value -const DefaultFileSystemType = FsExt4 - -const UnsetPool = "" -const DefaultVolumeSize = "1G" diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-aggr-space-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-aggr-space-get-iter.go deleted file mode 100644 index de9acb2bd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-aggr-space-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrSpaceGetIterRequest is a structure to represent a aggr-space-get-iter Request ZAPI object -type AggrSpaceGetIterRequest struct { - XMLName xml.Name `xml:"aggr-space-get-iter"` - DesiredAttributesPtr *AggrSpaceGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *AggrSpaceGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// AggrSpaceGetIterResponse is a structure to represent a aggr-space-get-iter Response ZAPI object -type AggrSpaceGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result AggrSpaceGetIterResponseResult `xml:"results"` -} - -// NewAggrSpaceGetIterResponse is a factory method for creating new instances of AggrSpaceGetIterResponse objects -func NewAggrSpaceGetIterResponse() *AggrSpaceGetIterResponse { - return &AggrSpaceGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *AggrSpaceGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// AggrSpaceGetIterResponseResult is a structure to represent a aggr-space-get-iter Response Result ZAPI object -type AggrSpaceGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *AggrSpaceGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewAggrSpaceGetIterRequest is a factory method for creating new instances of AggrSpaceGetIterRequest objects -func NewAggrSpaceGetIterRequest() *AggrSpaceGetIterRequest { - return &AggrSpaceGetIterRequest{} -} - -// NewAggrSpaceGetIterResponseResult is a factory method for creating new instances of AggrSpaceGetIterResponseResult objects -func NewAggrSpaceGetIterResponseResult() *AggrSpaceGetIterResponseResult { - return &AggrSpaceGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrSpaceGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *AggrSpaceGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *AggrSpaceGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*AggrSpaceGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *AggrSpaceGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*AggrSpaceGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "AggrSpaceGetIterRequest", NewAggrSpaceGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*AggrSpaceGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *AggrSpaceGetIterRequest) executeWithIteration(zr *ZapiRunner) (*AggrSpaceGetIterResponse, error) { - combined := NewAggrSpaceGetIterResponse() - combined.Result.SetAttributesList(AggrSpaceGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(AggrSpaceGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// AggrSpaceGetIterRequestDesiredAttributes is a wrapper -type AggrSpaceGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - SpaceInformationPtr *SpaceInformationType `xml:"space-information"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SpaceInformation is a 'getter' method -func (o *AggrSpaceGetIterRequestDesiredAttributes) SpaceInformation() SpaceInformationType { - r := *o.SpaceInformationPtr - return r -} - -// SetSpaceInformation is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterRequestDesiredAttributes) SetSpaceInformation(newValue SpaceInformationType) *AggrSpaceGetIterRequestDesiredAttributes { - o.SpaceInformationPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *AggrSpaceGetIterRequest) DesiredAttributes() AggrSpaceGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterRequest) SetDesiredAttributes(newValue AggrSpaceGetIterRequestDesiredAttributes) *AggrSpaceGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *AggrSpaceGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterRequest) SetMaxRecords(newValue int) *AggrSpaceGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// AggrSpaceGetIterRequestQuery is a wrapper -type AggrSpaceGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - SpaceInformationPtr *SpaceInformationType `xml:"space-information"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SpaceInformation is a 'getter' method -func (o *AggrSpaceGetIterRequestQuery) SpaceInformation() SpaceInformationType { - r := *o.SpaceInformationPtr - return r -} - -// SetSpaceInformation is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterRequestQuery) SetSpaceInformation(newValue SpaceInformationType) *AggrSpaceGetIterRequestQuery { - o.SpaceInformationPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *AggrSpaceGetIterRequest) Query() AggrSpaceGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterRequest) SetQuery(newValue AggrSpaceGetIterRequestQuery) *AggrSpaceGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *AggrSpaceGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterRequest) SetTag(newValue string) *AggrSpaceGetIterRequest { - o.TagPtr = &newValue - return o -} - -// AggrSpaceGetIterResponseResultAttributesList is a wrapper -type AggrSpaceGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - SpaceInformationPtr []SpaceInformationType `xml:"space-information"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SpaceInformation is a 'getter' method -func (o *AggrSpaceGetIterResponseResultAttributesList) SpaceInformation() []SpaceInformationType { - r := o.SpaceInformationPtr - return r -} - -// SetSpaceInformation is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterResponseResultAttributesList) SetSpaceInformation(newValue []SpaceInformationType) *AggrSpaceGetIterResponseResultAttributesList { - newSlice := make([]SpaceInformationType, len(newValue)) - copy(newSlice, newValue) - o.SpaceInformationPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *AggrSpaceGetIterResponseResultAttributesList) values() []SpaceInformationType { - r := o.SpaceInformationPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterResponseResultAttributesList) setValues(newValue []SpaceInformationType) *AggrSpaceGetIterResponseResultAttributesList { - newSlice := make([]SpaceInformationType, len(newValue)) - copy(newSlice, newValue) - o.SpaceInformationPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *AggrSpaceGetIterResponseResult) AttributesList() AggrSpaceGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterResponseResult) SetAttributesList(newValue AggrSpaceGetIterResponseResultAttributesList) *AggrSpaceGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *AggrSpaceGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterResponseResult) SetNextTag(newValue string) *AggrSpaceGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *AggrSpaceGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *AggrSpaceGetIterResponseResult) SetNumRecords(newValue int) *AggrSpaceGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-clone-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-clone-create.go deleted file mode 100644 index 336cbe259..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-clone-create.go +++ /dev/null @@ -1,400 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// CloneCreateRequest is a structure to represent a clone-create Request ZAPI object -type CloneCreateRequest struct { - XMLName xml.Name `xml:"clone-create"` - AutodeletePtr *bool `xml:"autodelete"` - BlockRangesPtr *CloneCreateRequestBlockRanges `xml:"block-ranges"` - BypassLicenseCheckPtr *bool `xml:"bypass-license-check"` - BypassThrottlePtr *bool `xml:"bypass-throttle"` - DestinationExistsPtr *bool `xml:"destination-exists"` - DestinationPathPtr *string `xml:"destination-path"` - DestinationVolumePtr *string `xml:"destination-volume"` - FixedBlockCountPtr *int `xml:"fixed-block-count"` - IgnoreLocksPtr *bool `xml:"ignore-locks"` - IgnoreStreamsPtr *bool `xml:"ignore-streams"` - IsBackupPtr *bool `xml:"is-backup"` - IsFixedBlockCountPtr *bool `xml:"is-fixed-block-count"` - IsVvolBackupPtr *bool `xml:"is-vvol-backup"` - LunSerialNumberPtr *string `xml:"lun-serial-number"` - NosplitEntryPtr *bool `xml:"nosplit-entry"` - QosPolicyGroupNamePtr *string `xml:"qos-policy-group-name"` - SnapshotNamePtr *string `xml:"snapshot-name"` - SourcePathPtr *string `xml:"source-path"` - SpaceReservePtr *bool `xml:"space-reserve"` - TokenUuidPtr *string `xml:"token-uuid"` - VolumePtr *string `xml:"volume"` -} - -// CloneCreateResponse is a structure to represent a clone-create Response ZAPI object -type CloneCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result CloneCreateResponseResult `xml:"results"` -} - -// NewCloneCreateResponse is a factory method for creating new instances of CloneCreateResponse objects -func NewCloneCreateResponse() *CloneCreateResponse { - return &CloneCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o CloneCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *CloneCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// CloneCreateResponseResult is a structure to represent a clone-create Response Result ZAPI object -type CloneCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewCloneCreateRequest is a factory method for creating new instances of CloneCreateRequest objects -func NewCloneCreateRequest() *CloneCreateRequest { - return &CloneCreateRequest{} -} - -// NewCloneCreateResponseResult is a factory method for creating new instances of CloneCreateResponseResult objects -func NewCloneCreateResponseResult() *CloneCreateResponseResult { - return &CloneCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *CloneCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *CloneCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o CloneCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o CloneCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *CloneCreateRequest) ExecuteUsing(zr *ZapiRunner) (*CloneCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *CloneCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*CloneCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "CloneCreateRequest", NewCloneCreateResponse()) - if result == nil { - return nil, err - } - return result.(*CloneCreateResponse), err -} - -// Autodelete is a 'getter' method -func (o *CloneCreateRequest) Autodelete() bool { - r := *o.AutodeletePtr - return r -} - -// SetAutodelete is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetAutodelete(newValue bool) *CloneCreateRequest { - o.AutodeletePtr = &newValue - return o -} - -// CloneCreateRequestBlockRanges is a wrapper -type CloneCreateRequestBlockRanges struct { - XMLName xml.Name `xml:"block-ranges"` - BlockRangePtr []BlockRangeType `xml:"block-range"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o CloneCreateRequestBlockRanges) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BlockRange is a 'getter' method -func (o *CloneCreateRequestBlockRanges) BlockRange() []BlockRangeType { - r := o.BlockRangePtr - return r -} - -// SetBlockRange is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequestBlockRanges) SetBlockRange(newValue []BlockRangeType) *CloneCreateRequestBlockRanges { - newSlice := make([]BlockRangeType, len(newValue)) - copy(newSlice, newValue) - o.BlockRangePtr = newSlice - return o -} - -// BlockRanges is a 'getter' method -func (o *CloneCreateRequest) BlockRanges() CloneCreateRequestBlockRanges { - r := *o.BlockRangesPtr - return r -} - -// SetBlockRanges is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetBlockRanges(newValue CloneCreateRequestBlockRanges) *CloneCreateRequest { - o.BlockRangesPtr = &newValue - return o -} - -// BypassLicenseCheck is a 'getter' method -func (o *CloneCreateRequest) BypassLicenseCheck() bool { - r := *o.BypassLicenseCheckPtr - return r -} - -// SetBypassLicenseCheck is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetBypassLicenseCheck(newValue bool) *CloneCreateRequest { - o.BypassLicenseCheckPtr = &newValue - return o -} - -// BypassThrottle is a 'getter' method -func (o *CloneCreateRequest) BypassThrottle() bool { - r := *o.BypassThrottlePtr - return r -} - -// SetBypassThrottle is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetBypassThrottle(newValue bool) *CloneCreateRequest { - o.BypassThrottlePtr = &newValue - return o -} - -// DestinationExists is a 'getter' method -func (o *CloneCreateRequest) DestinationExists() bool { - r := *o.DestinationExistsPtr - return r -} - -// SetDestinationExists is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetDestinationExists(newValue bool) *CloneCreateRequest { - o.DestinationExistsPtr = &newValue - return o -} - -// DestinationPath is a 'getter' method -func (o *CloneCreateRequest) DestinationPath() string { - r := *o.DestinationPathPtr - return r -} - -// SetDestinationPath is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetDestinationPath(newValue string) *CloneCreateRequest { - o.DestinationPathPtr = &newValue - return o -} - -// DestinationVolume is a 'getter' method -func (o *CloneCreateRequest) DestinationVolume() string { - r := *o.DestinationVolumePtr - return r -} - -// SetDestinationVolume is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetDestinationVolume(newValue string) *CloneCreateRequest { - o.DestinationVolumePtr = &newValue - return o -} - -// FixedBlockCount is a 'getter' method -func (o *CloneCreateRequest) FixedBlockCount() int { - r := *o.FixedBlockCountPtr - return r -} - -// SetFixedBlockCount is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetFixedBlockCount(newValue int) *CloneCreateRequest { - o.FixedBlockCountPtr = &newValue - return o -} - -// IgnoreLocks is a 'getter' method -func (o *CloneCreateRequest) IgnoreLocks() bool { - r := *o.IgnoreLocksPtr - return r -} - -// SetIgnoreLocks is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetIgnoreLocks(newValue bool) *CloneCreateRequest { - o.IgnoreLocksPtr = &newValue - return o -} - -// IgnoreStreams is a 'getter' method -func (o *CloneCreateRequest) IgnoreStreams() bool { - r := *o.IgnoreStreamsPtr - return r -} - -// SetIgnoreStreams is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetIgnoreStreams(newValue bool) *CloneCreateRequest { - o.IgnoreStreamsPtr = &newValue - return o -} - -// IsBackup is a 'getter' method -func (o *CloneCreateRequest) IsBackup() bool { - r := *o.IsBackupPtr - return r -} - -// SetIsBackup is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetIsBackup(newValue bool) *CloneCreateRequest { - o.IsBackupPtr = &newValue - return o -} - -// IsFixedBlockCount is a 'getter' method -func (o *CloneCreateRequest) IsFixedBlockCount() bool { - r := *o.IsFixedBlockCountPtr - return r -} - -// SetIsFixedBlockCount is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetIsFixedBlockCount(newValue bool) *CloneCreateRequest { - o.IsFixedBlockCountPtr = &newValue - return o -} - -// IsVvolBackup is a 'getter' method -func (o *CloneCreateRequest) IsVvolBackup() bool { - r := *o.IsVvolBackupPtr - return r -} - -// SetIsVvolBackup is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetIsVvolBackup(newValue bool) *CloneCreateRequest { - o.IsVvolBackupPtr = &newValue - return o -} - -// LunSerialNumber is a 'getter' method -func (o *CloneCreateRequest) LunSerialNumber() string { - r := *o.LunSerialNumberPtr - return r -} - -// SetLunSerialNumber is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetLunSerialNumber(newValue string) *CloneCreateRequest { - o.LunSerialNumberPtr = &newValue - return o -} - -// NosplitEntry is a 'getter' method -func (o *CloneCreateRequest) NosplitEntry() bool { - r := *o.NosplitEntryPtr - return r -} - -// SetNosplitEntry is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetNosplitEntry(newValue bool) *CloneCreateRequest { - o.NosplitEntryPtr = &newValue - return o -} - -// QosPolicyGroupName is a 'getter' method -func (o *CloneCreateRequest) QosPolicyGroupName() string { - r := *o.QosPolicyGroupNamePtr - return r -} - -// SetQosPolicyGroupName is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetQosPolicyGroupName(newValue string) *CloneCreateRequest { - o.QosPolicyGroupNamePtr = &newValue - return o -} - -// SnapshotName is a 'getter' method -func (o *CloneCreateRequest) SnapshotName() string { - r := *o.SnapshotNamePtr - return r -} - -// SetSnapshotName is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetSnapshotName(newValue string) *CloneCreateRequest { - o.SnapshotNamePtr = &newValue - return o -} - -// SourcePath is a 'getter' method -func (o *CloneCreateRequest) SourcePath() string { - r := *o.SourcePathPtr - return r -} - -// SetSourcePath is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetSourcePath(newValue string) *CloneCreateRequest { - o.SourcePathPtr = &newValue - return o -} - -// SpaceReserve is a 'getter' method -func (o *CloneCreateRequest) SpaceReserve() bool { - r := *o.SpaceReservePtr - return r -} - -// SetSpaceReserve is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetSpaceReserve(newValue bool) *CloneCreateRequest { - o.SpaceReservePtr = &newValue - return o -} - -// TokenUuid is a 'getter' method -func (o *CloneCreateRequest) TokenUuid() string { - r := *o.TokenUuidPtr - return r -} - -// SetTokenUuid is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetTokenUuid(newValue string) *CloneCreateRequest { - o.TokenUuidPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *CloneCreateRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *CloneCreateRequest) SetVolume(newValue string) *CloneCreateRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-ems-autosupport-log.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-ems-autosupport-log.go deleted file mode 100644 index 0ba9155e4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-ems-autosupport-log.go +++ /dev/null @@ -1,206 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// EmsAutosupportLogRequest is a structure to represent a ems-autosupport-log Request ZAPI object -type EmsAutosupportLogRequest struct { - XMLName xml.Name `xml:"ems-autosupport-log"` - AppVersionPtr *string `xml:"app-version"` - AutoSupportPtr *bool `xml:"auto-support"` - CategoryPtr *string `xml:"category"` - ComputerNamePtr *string `xml:"computer-name"` - EventDescriptionPtr *string `xml:"event-description"` - EventIdPtr *int `xml:"event-id"` - EventSourcePtr *string `xml:"event-source"` - LogLevelPtr *int `xml:"log-level"` -} - -// EmsAutosupportLogResponse is a structure to represent a ems-autosupport-log Response ZAPI object -type EmsAutosupportLogResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result EmsAutosupportLogResponseResult `xml:"results"` -} - -// NewEmsAutosupportLogResponse is a factory method for creating new instances of EmsAutosupportLogResponse objects -func NewEmsAutosupportLogResponse() *EmsAutosupportLogResponse { - return &EmsAutosupportLogResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o EmsAutosupportLogResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *EmsAutosupportLogResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// EmsAutosupportLogResponseResult is a structure to represent a ems-autosupport-log Response Result ZAPI object -type EmsAutosupportLogResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewEmsAutosupportLogRequest is a factory method for creating new instances of EmsAutosupportLogRequest objects -func NewEmsAutosupportLogRequest() *EmsAutosupportLogRequest { - return &EmsAutosupportLogRequest{} -} - -// NewEmsAutosupportLogResponseResult is a factory method for creating new instances of EmsAutosupportLogResponseResult objects -func NewEmsAutosupportLogResponseResult() *EmsAutosupportLogResponseResult { - return &EmsAutosupportLogResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *EmsAutosupportLogRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *EmsAutosupportLogResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o EmsAutosupportLogRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o EmsAutosupportLogResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *EmsAutosupportLogRequest) ExecuteUsing(zr *ZapiRunner) (*EmsAutosupportLogResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *EmsAutosupportLogRequest) executeWithoutIteration(zr *ZapiRunner) (*EmsAutosupportLogResponse, error) { - result, err := zr.ExecuteUsing(o, "EmsAutosupportLogRequest", NewEmsAutosupportLogResponse()) - if result == nil { - return nil, err - } - return result.(*EmsAutosupportLogResponse), err -} - -// AppVersion is a 'getter' method -func (o *EmsAutosupportLogRequest) AppVersion() string { - r := *o.AppVersionPtr - return r -} - -// SetAppVersion is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetAppVersion(newValue string) *EmsAutosupportLogRequest { - o.AppVersionPtr = &newValue - return o -} - -// AutoSupport is a 'getter' method -func (o *EmsAutosupportLogRequest) AutoSupport() bool { - r := *o.AutoSupportPtr - return r -} - -// SetAutoSupport is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetAutoSupport(newValue bool) *EmsAutosupportLogRequest { - o.AutoSupportPtr = &newValue - return o -} - -// Category is a 'getter' method -func (o *EmsAutosupportLogRequest) Category() string { - r := *o.CategoryPtr - return r -} - -// SetCategory is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetCategory(newValue string) *EmsAutosupportLogRequest { - o.CategoryPtr = &newValue - return o -} - -// ComputerName is a 'getter' method -func (o *EmsAutosupportLogRequest) ComputerName() string { - r := *o.ComputerNamePtr - return r -} - -// SetComputerName is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetComputerName(newValue string) *EmsAutosupportLogRequest { - o.ComputerNamePtr = &newValue - return o -} - -// EventDescription is a 'getter' method -func (o *EmsAutosupportLogRequest) EventDescription() string { - r := *o.EventDescriptionPtr - return r -} - -// SetEventDescription is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetEventDescription(newValue string) *EmsAutosupportLogRequest { - o.EventDescriptionPtr = &newValue - return o -} - -// EventId is a 'getter' method -func (o *EmsAutosupportLogRequest) EventId() int { - r := *o.EventIdPtr - return r -} - -// SetEventId is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetEventId(newValue int) *EmsAutosupportLogRequest { - o.EventIdPtr = &newValue - return o -} - -// EventSource is a 'getter' method -func (o *EmsAutosupportLogRequest) EventSource() string { - r := *o.EventSourcePtr - return r -} - -// SetEventSource is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetEventSource(newValue string) *EmsAutosupportLogRequest { - o.EventSourcePtr = &newValue - return o -} - -// LogLevel is a 'getter' method -func (o *EmsAutosupportLogRequest) LogLevel() int { - r := *o.LogLevelPtr - return r -} - -// SetLogLevel is a fluent style 'setter' method that can be chained -func (o *EmsAutosupportLogRequest) SetLogLevel(newValue int) *EmsAutosupportLogRequest { - o.LogLevelPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-policy-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-policy-create.go deleted file mode 100644 index d1b5f6b69..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-policy-create.go +++ /dev/null @@ -1,176 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// ExportPolicyCreateRequest is a structure to represent a export-policy-create Request ZAPI object -type ExportPolicyCreateRequest struct { - XMLName xml.Name `xml:"export-policy-create"` - PolicyNamePtr *ExportPolicyNameType `xml:"policy-name"` - ReturnRecordPtr *bool `xml:"return-record"` -} - -// ExportPolicyCreateResponse is a structure to represent a export-policy-create Response ZAPI object -type ExportPolicyCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result ExportPolicyCreateResponseResult `xml:"results"` -} - -// NewExportPolicyCreateResponse is a factory method for creating new instances of ExportPolicyCreateResponse objects -func NewExportPolicyCreateResponse() *ExportPolicyCreateResponse { - return &ExportPolicyCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportPolicyCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *ExportPolicyCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ExportPolicyCreateResponseResult is a structure to represent a export-policy-create Response Result ZAPI object -type ExportPolicyCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultPtr *ExportPolicyCreateResponseResultResult `xml:"result"` -} - -// NewExportPolicyCreateRequest is a factory method for creating new instances of ExportPolicyCreateRequest objects -func NewExportPolicyCreateRequest() *ExportPolicyCreateRequest { - return &ExportPolicyCreateRequest{} -} - -// NewExportPolicyCreateResponseResult is a factory method for creating new instances of ExportPolicyCreateResponseResult objects -func NewExportPolicyCreateResponseResult() *ExportPolicyCreateResponseResult { - return &ExportPolicyCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *ExportPolicyCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *ExportPolicyCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportPolicyCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportPolicyCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *ExportPolicyCreateRequest) ExecuteUsing(zr *ZapiRunner) (*ExportPolicyCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *ExportPolicyCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*ExportPolicyCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "ExportPolicyCreateRequest", NewExportPolicyCreateResponse()) - if result == nil { - return nil, err - } - return result.(*ExportPolicyCreateResponse), err -} - -// PolicyName is a 'getter' method -func (o *ExportPolicyCreateRequest) PolicyName() ExportPolicyNameType { - r := *o.PolicyNamePtr - return r -} - -// SetPolicyName is a fluent style 'setter' method that can be chained -func (o *ExportPolicyCreateRequest) SetPolicyName(newValue ExportPolicyNameType) *ExportPolicyCreateRequest { - o.PolicyNamePtr = &newValue - return o -} - -// ReturnRecord is a 'getter' method -func (o *ExportPolicyCreateRequest) ReturnRecord() bool { - r := *o.ReturnRecordPtr - return r -} - -// SetReturnRecord is a fluent style 'setter' method that can be chained -func (o *ExportPolicyCreateRequest) SetReturnRecord(newValue bool) *ExportPolicyCreateRequest { - o.ReturnRecordPtr = &newValue - return o -} - -// ExportPolicyCreateResponseResultResult is a wrapper -type ExportPolicyCreateResponseResultResult struct { - XMLName xml.Name `xml:"result"` - ExportPolicyInfoPtr *ExportPolicyInfoType `xml:"export-policy-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportPolicyCreateResponseResultResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExportPolicyInfo is a 'getter' method -func (o *ExportPolicyCreateResponseResultResult) ExportPolicyInfo() ExportPolicyInfoType { - r := *o.ExportPolicyInfoPtr - return r -} - -// SetExportPolicyInfo is a fluent style 'setter' method that can be chained -func (o *ExportPolicyCreateResponseResultResult) SetExportPolicyInfo(newValue ExportPolicyInfoType) *ExportPolicyCreateResponseResultResult { - o.ExportPolicyInfoPtr = &newValue - return o -} - -// values is a 'getter' method -func (o *ExportPolicyCreateResponseResultResult) values() ExportPolicyInfoType { - r := *o.ExportPolicyInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *ExportPolicyCreateResponseResultResult) setValues(newValue ExportPolicyInfoType) *ExportPolicyCreateResponseResultResult { - o.ExportPolicyInfoPtr = &newValue - return o -} - -// Result is a 'getter' method -func (o *ExportPolicyCreateResponseResult) Result() ExportPolicyCreateResponseResultResult { - r := *o.ResultPtr - return r -} - -// SetResult is a fluent style 'setter' method that can be chained -func (o *ExportPolicyCreateResponseResult) SetResult(newValue ExportPolicyCreateResponseResultResult) *ExportPolicyCreateResponseResult { - o.ResultPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-rule-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-rule-create.go deleted file mode 100644 index 43cb9a7f2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-rule-create.go +++ /dev/null @@ -1,358 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// ExportRuleCreateRequest is a structure to represent a export-rule-create Request ZAPI object -type ExportRuleCreateRequest struct { - XMLName xml.Name `xml:"export-rule-create"` - AnonymousUserIdPtr *string `xml:"anonymous-user-id"` - ClientMatchPtr *string `xml:"client-match"` - ExportChownModePtr *ExportchownmodeType `xml:"export-chown-mode"` - ExportNtfsUnixSecurityOpsPtr *ExportntfsunixsecopsType `xml:"export-ntfs-unix-security-ops"` - IsAllowDevIsEnabledPtr *bool `xml:"is-allow-dev-is-enabled"` - IsAllowSetUidEnabledPtr *bool `xml:"is-allow-set-uid-enabled"` - PolicyNamePtr *ExportPolicyNameType `xml:"policy-name"` - ProtocolPtr *ExportRuleCreateRequestProtocol `xml:"protocol"` - RoRulePtr *ExportRuleCreateRequestRoRule `xml:"ro-rule"` - RuleIndexPtr *int `xml:"rule-index"` - RwRulePtr *ExportRuleCreateRequestRwRule `xml:"rw-rule"` - SuperUserSecurityPtr *ExportRuleCreateRequestSuperUserSecurity `xml:"super-user-security"` -} - -// ExportRuleCreateResponse is a structure to represent a export-rule-create Response ZAPI object -type ExportRuleCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result ExportRuleCreateResponseResult `xml:"results"` -} - -// NewExportRuleCreateResponse is a factory method for creating new instances of ExportRuleCreateResponse objects -func NewExportRuleCreateResponse() *ExportRuleCreateResponse { - return &ExportRuleCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ExportRuleCreateResponseResult is a structure to represent a export-rule-create Response Result ZAPI object -type ExportRuleCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewExportRuleCreateRequest is a factory method for creating new instances of ExportRuleCreateRequest objects -func NewExportRuleCreateRequest() *ExportRuleCreateRequest { - return &ExportRuleCreateRequest{} -} - -// NewExportRuleCreateResponseResult is a factory method for creating new instances of ExportRuleCreateResponseResult objects -func NewExportRuleCreateResponseResult() *ExportRuleCreateResponseResult { - return &ExportRuleCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *ExportRuleCreateRequest) ExecuteUsing(zr *ZapiRunner) (*ExportRuleCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *ExportRuleCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*ExportRuleCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "ExportRuleCreateRequest", NewExportRuleCreateResponse()) - if result == nil { - return nil, err - } - return result.(*ExportRuleCreateResponse), err -} - -// AnonymousUserId is a 'getter' method -func (o *ExportRuleCreateRequest) AnonymousUserId() string { - r := *o.AnonymousUserIdPtr - return r -} - -// SetAnonymousUserId is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetAnonymousUserId(newValue string) *ExportRuleCreateRequest { - o.AnonymousUserIdPtr = &newValue - return o -} - -// ClientMatch is a 'getter' method -func (o *ExportRuleCreateRequest) ClientMatch() string { - r := *o.ClientMatchPtr - return r -} - -// SetClientMatch is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetClientMatch(newValue string) *ExportRuleCreateRequest { - o.ClientMatchPtr = &newValue - return o -} - -// ExportChownMode is a 'getter' method -func (o *ExportRuleCreateRequest) ExportChownMode() ExportchownmodeType { - r := *o.ExportChownModePtr - return r -} - -// SetExportChownMode is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetExportChownMode(newValue ExportchownmodeType) *ExportRuleCreateRequest { - o.ExportChownModePtr = &newValue - return o -} - -// ExportNtfsUnixSecurityOps is a 'getter' method -func (o *ExportRuleCreateRequest) ExportNtfsUnixSecurityOps() ExportntfsunixsecopsType { - r := *o.ExportNtfsUnixSecurityOpsPtr - return r -} - -// SetExportNtfsUnixSecurityOps is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetExportNtfsUnixSecurityOps(newValue ExportntfsunixsecopsType) *ExportRuleCreateRequest { - o.ExportNtfsUnixSecurityOpsPtr = &newValue - return o -} - -// IsAllowDevIsEnabled is a 'getter' method -func (o *ExportRuleCreateRequest) IsAllowDevIsEnabled() bool { - r := *o.IsAllowDevIsEnabledPtr - return r -} - -// SetIsAllowDevIsEnabled is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetIsAllowDevIsEnabled(newValue bool) *ExportRuleCreateRequest { - o.IsAllowDevIsEnabledPtr = &newValue - return o -} - -// IsAllowSetUidEnabled is a 'getter' method -func (o *ExportRuleCreateRequest) IsAllowSetUidEnabled() bool { - r := *o.IsAllowSetUidEnabledPtr - return r -} - -// SetIsAllowSetUidEnabled is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetIsAllowSetUidEnabled(newValue bool) *ExportRuleCreateRequest { - o.IsAllowSetUidEnabledPtr = &newValue - return o -} - -// PolicyName is a 'getter' method -func (o *ExportRuleCreateRequest) PolicyName() ExportPolicyNameType { - r := *o.PolicyNamePtr - return r -} - -// SetPolicyName is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetPolicyName(newValue ExportPolicyNameType) *ExportRuleCreateRequest { - o.PolicyNamePtr = &newValue - return o -} - -// ExportRuleCreateRequestProtocol is a wrapper -type ExportRuleCreateRequestProtocol struct { - XMLName xml.Name `xml:"protocol"` - AccessProtocolPtr []AccessProtocolType `xml:"access-protocol"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateRequestProtocol) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AccessProtocol is a 'getter' method -func (o *ExportRuleCreateRequestProtocol) AccessProtocol() []AccessProtocolType { - r := o.AccessProtocolPtr - return r -} - -// SetAccessProtocol is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequestProtocol) SetAccessProtocol(newValue []AccessProtocolType) *ExportRuleCreateRequestProtocol { - newSlice := make([]AccessProtocolType, len(newValue)) - copy(newSlice, newValue) - o.AccessProtocolPtr = newSlice - return o -} - -// Protocol is a 'getter' method -func (o *ExportRuleCreateRequest) Protocol() ExportRuleCreateRequestProtocol { - r := *o.ProtocolPtr - return r -} - -// SetProtocol is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetProtocol(newValue ExportRuleCreateRequestProtocol) *ExportRuleCreateRequest { - o.ProtocolPtr = &newValue - return o -} - -// ExportRuleCreateRequestRoRule is a wrapper -type ExportRuleCreateRequestRoRule struct { - XMLName xml.Name `xml:"ro-rule"` - SecurityFlavorPtr []SecurityFlavorType `xml:"security-flavor"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateRequestRoRule) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SecurityFlavor is a 'getter' method -func (o *ExportRuleCreateRequestRoRule) SecurityFlavor() []SecurityFlavorType { - r := o.SecurityFlavorPtr - return r -} - -// SetSecurityFlavor is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequestRoRule) SetSecurityFlavor(newValue []SecurityFlavorType) *ExportRuleCreateRequestRoRule { - newSlice := make([]SecurityFlavorType, len(newValue)) - copy(newSlice, newValue) - o.SecurityFlavorPtr = newSlice - return o -} - -// RoRule is a 'getter' method -func (o *ExportRuleCreateRequest) RoRule() ExportRuleCreateRequestRoRule { - r := *o.RoRulePtr - return r -} - -// SetRoRule is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetRoRule(newValue ExportRuleCreateRequestRoRule) *ExportRuleCreateRequest { - o.RoRulePtr = &newValue - return o -} - -// RuleIndex is a 'getter' method -func (o *ExportRuleCreateRequest) RuleIndex() int { - r := *o.RuleIndexPtr - return r -} - -// SetRuleIndex is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetRuleIndex(newValue int) *ExportRuleCreateRequest { - o.RuleIndexPtr = &newValue - return o -} - -// ExportRuleCreateRequestRwRule is a wrapper -type ExportRuleCreateRequestRwRule struct { - XMLName xml.Name `xml:"rw-rule"` - SecurityFlavorPtr []SecurityFlavorType `xml:"security-flavor"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateRequestRwRule) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SecurityFlavor is a 'getter' method -func (o *ExportRuleCreateRequestRwRule) SecurityFlavor() []SecurityFlavorType { - r := o.SecurityFlavorPtr - return r -} - -// SetSecurityFlavor is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequestRwRule) SetSecurityFlavor(newValue []SecurityFlavorType) *ExportRuleCreateRequestRwRule { - newSlice := make([]SecurityFlavorType, len(newValue)) - copy(newSlice, newValue) - o.SecurityFlavorPtr = newSlice - return o -} - -// RwRule is a 'getter' method -func (o *ExportRuleCreateRequest) RwRule() ExportRuleCreateRequestRwRule { - r := *o.RwRulePtr - return r -} - -// SetRwRule is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetRwRule(newValue ExportRuleCreateRequestRwRule) *ExportRuleCreateRequest { - o.RwRulePtr = &newValue - return o -} - -// ExportRuleCreateRequestSuperUserSecurity is a wrapper -type ExportRuleCreateRequestSuperUserSecurity struct { - XMLName xml.Name `xml:"super-user-security"` - SecurityFlavorPtr []SecurityFlavorType `xml:"security-flavor"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleCreateRequestSuperUserSecurity) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SecurityFlavor is a 'getter' method -func (o *ExportRuleCreateRequestSuperUserSecurity) SecurityFlavor() []SecurityFlavorType { - r := o.SecurityFlavorPtr - return r -} - -// SetSecurityFlavor is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequestSuperUserSecurity) SetSecurityFlavor(newValue []SecurityFlavorType) *ExportRuleCreateRequestSuperUserSecurity { - newSlice := make([]SecurityFlavorType, len(newValue)) - copy(newSlice, newValue) - o.SecurityFlavorPtr = newSlice - return o -} - -// SuperUserSecurity is a 'getter' method -func (o *ExportRuleCreateRequest) SuperUserSecurity() ExportRuleCreateRequestSuperUserSecurity { - r := *o.SuperUserSecurityPtr - return r -} - -// SetSuperUserSecurity is a fluent style 'setter' method that can be chained -func (o *ExportRuleCreateRequest) SetSuperUserSecurity(newValue ExportRuleCreateRequestSuperUserSecurity) *ExportRuleCreateRequest { - o.SuperUserSecurityPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-rule-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-rule-get-iter.go deleted file mode 100644 index 5cc7d79f8..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-export-rule-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// ExportRuleGetIterRequest is a structure to represent a export-rule-get-iter Request ZAPI object -type ExportRuleGetIterRequest struct { - XMLName xml.Name `xml:"export-rule-get-iter"` - DesiredAttributesPtr *ExportRuleGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *ExportRuleGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// ExportRuleGetIterResponse is a structure to represent a export-rule-get-iter Response ZAPI object -type ExportRuleGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result ExportRuleGetIterResponseResult `xml:"results"` -} - -// NewExportRuleGetIterResponse is a factory method for creating new instances of ExportRuleGetIterResponse objects -func NewExportRuleGetIterResponse() *ExportRuleGetIterResponse { - return &ExportRuleGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ExportRuleGetIterResponseResult is a structure to represent a export-rule-get-iter Response Result ZAPI object -type ExportRuleGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *ExportRuleGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewExportRuleGetIterRequest is a factory method for creating new instances of ExportRuleGetIterRequest objects -func NewExportRuleGetIterRequest() *ExportRuleGetIterRequest { - return &ExportRuleGetIterRequest{} -} - -// NewExportRuleGetIterResponseResult is a factory method for creating new instances of ExportRuleGetIterResponseResult objects -func NewExportRuleGetIterResponseResult() *ExportRuleGetIterResponseResult { - return &ExportRuleGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *ExportRuleGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*ExportRuleGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *ExportRuleGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*ExportRuleGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "ExportRuleGetIterRequest", NewExportRuleGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*ExportRuleGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *ExportRuleGetIterRequest) executeWithIteration(zr *ZapiRunner) (*ExportRuleGetIterResponse, error) { - combined := NewExportRuleGetIterResponse() - combined.Result.SetAttributesList(ExportRuleGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(ExportRuleGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// ExportRuleGetIterRequestDesiredAttributes is a wrapper -type ExportRuleGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - ExportRuleInfoPtr *ExportRuleInfoType `xml:"export-rule-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExportRuleInfo is a 'getter' method -func (o *ExportRuleGetIterRequestDesiredAttributes) ExportRuleInfo() ExportRuleInfoType { - r := *o.ExportRuleInfoPtr - return r -} - -// SetExportRuleInfo is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterRequestDesiredAttributes) SetExportRuleInfo(newValue ExportRuleInfoType) *ExportRuleGetIterRequestDesiredAttributes { - o.ExportRuleInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *ExportRuleGetIterRequest) DesiredAttributes() ExportRuleGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterRequest) SetDesiredAttributes(newValue ExportRuleGetIterRequestDesiredAttributes) *ExportRuleGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *ExportRuleGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterRequest) SetMaxRecords(newValue int) *ExportRuleGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// ExportRuleGetIterRequestQuery is a wrapper -type ExportRuleGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - ExportRuleInfoPtr *ExportRuleInfoType `xml:"export-rule-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExportRuleInfo is a 'getter' method -func (o *ExportRuleGetIterRequestQuery) ExportRuleInfo() ExportRuleInfoType { - r := *o.ExportRuleInfoPtr - return r -} - -// SetExportRuleInfo is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterRequestQuery) SetExportRuleInfo(newValue ExportRuleInfoType) *ExportRuleGetIterRequestQuery { - o.ExportRuleInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *ExportRuleGetIterRequest) Query() ExportRuleGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterRequest) SetQuery(newValue ExportRuleGetIterRequestQuery) *ExportRuleGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *ExportRuleGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterRequest) SetTag(newValue string) *ExportRuleGetIterRequest { - o.TagPtr = &newValue - return o -} - -// ExportRuleGetIterResponseResultAttributesList is a wrapper -type ExportRuleGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - ExportRuleInfoPtr []ExportRuleInfoType `xml:"export-rule-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExportRuleInfo is a 'getter' method -func (o *ExportRuleGetIterResponseResultAttributesList) ExportRuleInfo() []ExportRuleInfoType { - r := o.ExportRuleInfoPtr - return r -} - -// SetExportRuleInfo is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterResponseResultAttributesList) SetExportRuleInfo(newValue []ExportRuleInfoType) *ExportRuleGetIterResponseResultAttributesList { - newSlice := make([]ExportRuleInfoType, len(newValue)) - copy(newSlice, newValue) - o.ExportRuleInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *ExportRuleGetIterResponseResultAttributesList) values() []ExportRuleInfoType { - r := o.ExportRuleInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterResponseResultAttributesList) setValues(newValue []ExportRuleInfoType) *ExportRuleGetIterResponseResultAttributesList { - newSlice := make([]ExportRuleInfoType, len(newValue)) - copy(newSlice, newValue) - o.ExportRuleInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *ExportRuleGetIterResponseResult) AttributesList() ExportRuleGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterResponseResult) SetAttributesList(newValue ExportRuleGetIterResponseResultAttributesList) *ExportRuleGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *ExportRuleGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterResponseResult) SetNextTag(newValue string) *ExportRuleGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *ExportRuleGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *ExportRuleGetIterResponseResult) SetNumRecords(newValue int) *ExportRuleGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-add.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-add.go deleted file mode 100644 index 941460462..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-add.go +++ /dev/null @@ -1,141 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IgroupAddRequest is a structure to represent a igroup-add Request ZAPI object -type IgroupAddRequest struct { - XMLName xml.Name `xml:"igroup-add"` - ForcePtr *bool `xml:"force"` - InitiatorPtr *string `xml:"initiator"` - InitiatorGroupNamePtr *string `xml:"initiator-group-name"` -} - -// IgroupAddResponse is a structure to represent a igroup-add Response ZAPI object -type IgroupAddResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IgroupAddResponseResult `xml:"results"` -} - -// NewIgroupAddResponse is a factory method for creating new instances of IgroupAddResponse objects -func NewIgroupAddResponse() *IgroupAddResponse { - return &IgroupAddResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupAddResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IgroupAddResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IgroupAddResponseResult is a structure to represent a igroup-add Response Result ZAPI object -type IgroupAddResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewIgroupAddRequest is a factory method for creating new instances of IgroupAddRequest objects -func NewIgroupAddRequest() *IgroupAddRequest { - return &IgroupAddRequest{} -} - -// NewIgroupAddResponseResult is a factory method for creating new instances of IgroupAddResponseResult objects -func NewIgroupAddResponseResult() *IgroupAddResponseResult { - return &IgroupAddResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IgroupAddRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IgroupAddResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupAddRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupAddResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupAddRequest) ExecuteUsing(zr *ZapiRunner) (*IgroupAddResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupAddRequest) executeWithoutIteration(zr *ZapiRunner) (*IgroupAddResponse, error) { - result, err := zr.ExecuteUsing(o, "IgroupAddRequest", NewIgroupAddResponse()) - if result == nil { - return nil, err - } - return result.(*IgroupAddResponse), err -} - -// Force is a 'getter' method -func (o *IgroupAddRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *IgroupAddRequest) SetForce(newValue bool) *IgroupAddRequest { - o.ForcePtr = &newValue - return o -} - -// Initiator is a 'getter' method -func (o *IgroupAddRequest) Initiator() string { - r := *o.InitiatorPtr - return r -} - -// SetInitiator is a fluent style 'setter' method that can be chained -func (o *IgroupAddRequest) SetInitiator(newValue string) *IgroupAddRequest { - o.InitiatorPtr = &newValue - return o -} - -// InitiatorGroupName is a 'getter' method -func (o *IgroupAddRequest) InitiatorGroupName() string { - r := *o.InitiatorGroupNamePtr - return r -} - -// SetInitiatorGroupName is a fluent style 'setter' method that can be chained -func (o *IgroupAddRequest) SetInitiatorGroupName(newValue string) *IgroupAddRequest { - o.InitiatorGroupNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-create.go deleted file mode 100644 index 817888c20..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-create.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IgroupCreateRequest is a structure to represent a igroup-create Request ZAPI object -type IgroupCreateRequest struct { - XMLName xml.Name `xml:"igroup-create"` - BindPortsetPtr *string `xml:"bind-portset"` - InitiatorGroupNamePtr *string `xml:"initiator-group-name"` - InitiatorGroupTypePtr *string `xml:"initiator-group-type"` - OsTypePtr *InitiatorGroupOsTypeType `xml:"os-type"` - OstypePtr *InitiatorGroupOsTypeType `xml:"ostype"` -} - -// IgroupCreateResponse is a structure to represent a igroup-create Response ZAPI object -type IgroupCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IgroupCreateResponseResult `xml:"results"` -} - -// NewIgroupCreateResponse is a factory method for creating new instances of IgroupCreateResponse objects -func NewIgroupCreateResponse() *IgroupCreateResponse { - return &IgroupCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IgroupCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IgroupCreateResponseResult is a structure to represent a igroup-create Response Result ZAPI object -type IgroupCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewIgroupCreateRequest is a factory method for creating new instances of IgroupCreateRequest objects -func NewIgroupCreateRequest() *IgroupCreateRequest { - return &IgroupCreateRequest{} -} - -// NewIgroupCreateResponseResult is a factory method for creating new instances of IgroupCreateResponseResult objects -func NewIgroupCreateResponseResult() *IgroupCreateResponseResult { - return &IgroupCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IgroupCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IgroupCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupCreateRequest) ExecuteUsing(zr *ZapiRunner) (*IgroupCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*IgroupCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "IgroupCreateRequest", NewIgroupCreateResponse()) - if result == nil { - return nil, err - } - return result.(*IgroupCreateResponse), err -} - -// BindPortset is a 'getter' method -func (o *IgroupCreateRequest) BindPortset() string { - r := *o.BindPortsetPtr - return r -} - -// SetBindPortset is a fluent style 'setter' method that can be chained -func (o *IgroupCreateRequest) SetBindPortset(newValue string) *IgroupCreateRequest { - o.BindPortsetPtr = &newValue - return o -} - -// InitiatorGroupName is a 'getter' method -func (o *IgroupCreateRequest) InitiatorGroupName() string { - r := *o.InitiatorGroupNamePtr - return r -} - -// SetInitiatorGroupName is a fluent style 'setter' method that can be chained -func (o *IgroupCreateRequest) SetInitiatorGroupName(newValue string) *IgroupCreateRequest { - o.InitiatorGroupNamePtr = &newValue - return o -} - -// InitiatorGroupType is a 'getter' method -func (o *IgroupCreateRequest) InitiatorGroupType() string { - r := *o.InitiatorGroupTypePtr - return r -} - -// SetInitiatorGroupType is a fluent style 'setter' method that can be chained -func (o *IgroupCreateRequest) SetInitiatorGroupType(newValue string) *IgroupCreateRequest { - o.InitiatorGroupTypePtr = &newValue - return o -} - -// OsType is a 'getter' method -func (o *IgroupCreateRequest) OsType() InitiatorGroupOsTypeType { - r := *o.OsTypePtr - return r -} - -// SetOsType is a fluent style 'setter' method that can be chained -func (o *IgroupCreateRequest) SetOsType(newValue InitiatorGroupOsTypeType) *IgroupCreateRequest { - o.OsTypePtr = &newValue - return o -} - -// Ostype is a 'getter' method -func (o *IgroupCreateRequest) Ostype() InitiatorGroupOsTypeType { - r := *o.OstypePtr - return r -} - -// SetOstype is a fluent style 'setter' method that can be chained -func (o *IgroupCreateRequest) SetOstype(newValue InitiatorGroupOsTypeType) *IgroupCreateRequest { - o.OstypePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-destroy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-destroy.go deleted file mode 100644 index ef6b135b0..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-destroy.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IgroupDestroyRequest is a structure to represent a igroup-destroy Request ZAPI object -type IgroupDestroyRequest struct { - XMLName xml.Name `xml:"igroup-destroy"` - ForcePtr *bool `xml:"force"` - InitiatorGroupNamePtr *string `xml:"initiator-group-name"` -} - -// IgroupDestroyResponse is a structure to represent a igroup-destroy Response ZAPI object -type IgroupDestroyResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IgroupDestroyResponseResult `xml:"results"` -} - -// NewIgroupDestroyResponse is a factory method for creating new instances of IgroupDestroyResponse objects -func NewIgroupDestroyResponse() *IgroupDestroyResponse { - return &IgroupDestroyResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupDestroyResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IgroupDestroyResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IgroupDestroyResponseResult is a structure to represent a igroup-destroy Response Result ZAPI object -type IgroupDestroyResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewIgroupDestroyRequest is a factory method for creating new instances of IgroupDestroyRequest objects -func NewIgroupDestroyRequest() *IgroupDestroyRequest { - return &IgroupDestroyRequest{} -} - -// NewIgroupDestroyResponseResult is a factory method for creating new instances of IgroupDestroyResponseResult objects -func NewIgroupDestroyResponseResult() *IgroupDestroyResponseResult { - return &IgroupDestroyResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IgroupDestroyRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IgroupDestroyResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupDestroyRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupDestroyResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupDestroyRequest) ExecuteUsing(zr *ZapiRunner) (*IgroupDestroyResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupDestroyRequest) executeWithoutIteration(zr *ZapiRunner) (*IgroupDestroyResponse, error) { - result, err := zr.ExecuteUsing(o, "IgroupDestroyRequest", NewIgroupDestroyResponse()) - if result == nil { - return nil, err - } - return result.(*IgroupDestroyResponse), err -} - -// Force is a 'getter' method -func (o *IgroupDestroyRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *IgroupDestroyRequest) SetForce(newValue bool) *IgroupDestroyRequest { - o.ForcePtr = &newValue - return o -} - -// InitiatorGroupName is a 'getter' method -func (o *IgroupDestroyRequest) InitiatorGroupName() string { - r := *o.InitiatorGroupNamePtr - return r -} - -// SetInitiatorGroupName is a fluent style 'setter' method that can be chained -func (o *IgroupDestroyRequest) SetInitiatorGroupName(newValue string) *IgroupDestroyRequest { - o.InitiatorGroupNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-get-iter.go deleted file mode 100644 index 467dbe875..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IgroupGetIterRequest is a structure to represent a igroup-get-iter Request ZAPI object -type IgroupGetIterRequest struct { - XMLName xml.Name `xml:"igroup-get-iter"` - DesiredAttributesPtr *IgroupGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *IgroupGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// IgroupGetIterResponse is a structure to represent a igroup-get-iter Response ZAPI object -type IgroupGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IgroupGetIterResponseResult `xml:"results"` -} - -// NewIgroupGetIterResponse is a factory method for creating new instances of IgroupGetIterResponse objects -func NewIgroupGetIterResponse() *IgroupGetIterResponse { - return &IgroupGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IgroupGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IgroupGetIterResponseResult is a structure to represent a igroup-get-iter Response Result ZAPI object -type IgroupGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *IgroupGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewIgroupGetIterRequest is a factory method for creating new instances of IgroupGetIterRequest objects -func NewIgroupGetIterRequest() *IgroupGetIterRequest { - return &IgroupGetIterRequest{} -} - -// NewIgroupGetIterResponseResult is a factory method for creating new instances of IgroupGetIterResponseResult objects -func NewIgroupGetIterResponseResult() *IgroupGetIterResponseResult { - return &IgroupGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IgroupGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IgroupGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*IgroupGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*IgroupGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "IgroupGetIterRequest", NewIgroupGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*IgroupGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *IgroupGetIterRequest) executeWithIteration(zr *ZapiRunner) (*IgroupGetIterResponse, error) { - combined := NewIgroupGetIterResponse() - combined.Result.SetAttributesList(IgroupGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(IgroupGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// IgroupGetIterRequestDesiredAttributes is a wrapper -type IgroupGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - InitiatorGroupInfoPtr *InitiatorGroupInfoType `xml:"initiator-group-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// InitiatorGroupInfo is a 'getter' method -func (o *IgroupGetIterRequestDesiredAttributes) InitiatorGroupInfo() InitiatorGroupInfoType { - r := *o.InitiatorGroupInfoPtr - return r -} - -// SetInitiatorGroupInfo is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterRequestDesiredAttributes) SetInitiatorGroupInfo(newValue InitiatorGroupInfoType) *IgroupGetIterRequestDesiredAttributes { - o.InitiatorGroupInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *IgroupGetIterRequest) DesiredAttributes() IgroupGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterRequest) SetDesiredAttributes(newValue IgroupGetIterRequestDesiredAttributes) *IgroupGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *IgroupGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterRequest) SetMaxRecords(newValue int) *IgroupGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// IgroupGetIterRequestQuery is a wrapper -type IgroupGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - InitiatorGroupInfoPtr *InitiatorGroupInfoType `xml:"initiator-group-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// InitiatorGroupInfo is a 'getter' method -func (o *IgroupGetIterRequestQuery) InitiatorGroupInfo() InitiatorGroupInfoType { - r := *o.InitiatorGroupInfoPtr - return r -} - -// SetInitiatorGroupInfo is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterRequestQuery) SetInitiatorGroupInfo(newValue InitiatorGroupInfoType) *IgroupGetIterRequestQuery { - o.InitiatorGroupInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *IgroupGetIterRequest) Query() IgroupGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterRequest) SetQuery(newValue IgroupGetIterRequestQuery) *IgroupGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *IgroupGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterRequest) SetTag(newValue string) *IgroupGetIterRequest { - o.TagPtr = &newValue - return o -} - -// IgroupGetIterResponseResultAttributesList is a wrapper -type IgroupGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - InitiatorGroupInfoPtr []InitiatorGroupInfoType `xml:"initiator-group-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// InitiatorGroupInfo is a 'getter' method -func (o *IgroupGetIterResponseResultAttributesList) InitiatorGroupInfo() []InitiatorGroupInfoType { - r := o.InitiatorGroupInfoPtr - return r -} - -// SetInitiatorGroupInfo is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterResponseResultAttributesList) SetInitiatorGroupInfo(newValue []InitiatorGroupInfoType) *IgroupGetIterResponseResultAttributesList { - newSlice := make([]InitiatorGroupInfoType, len(newValue)) - copy(newSlice, newValue) - o.InitiatorGroupInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *IgroupGetIterResponseResultAttributesList) values() []InitiatorGroupInfoType { - r := o.InitiatorGroupInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterResponseResultAttributesList) setValues(newValue []InitiatorGroupInfoType) *IgroupGetIterResponseResultAttributesList { - newSlice := make([]InitiatorGroupInfoType, len(newValue)) - copy(newSlice, newValue) - o.InitiatorGroupInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *IgroupGetIterResponseResult) AttributesList() IgroupGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterResponseResult) SetAttributesList(newValue IgroupGetIterResponseResultAttributesList) *IgroupGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *IgroupGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterResponseResult) SetNextTag(newValue string) *IgroupGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *IgroupGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *IgroupGetIterResponseResult) SetNumRecords(newValue int) *IgroupGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-remove.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-remove.go deleted file mode 100644 index a3c996329..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-igroup-remove.go +++ /dev/null @@ -1,141 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IgroupRemoveRequest is a structure to represent a igroup-remove Request ZAPI object -type IgroupRemoveRequest struct { - XMLName xml.Name `xml:"igroup-remove"` - ForcePtr *bool `xml:"force"` - InitiatorPtr *string `xml:"initiator"` - InitiatorGroupNamePtr *string `xml:"initiator-group-name"` -} - -// IgroupRemoveResponse is a structure to represent a igroup-remove Response ZAPI object -type IgroupRemoveResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IgroupRemoveResponseResult `xml:"results"` -} - -// NewIgroupRemoveResponse is a factory method for creating new instances of IgroupRemoveResponse objects -func NewIgroupRemoveResponse() *IgroupRemoveResponse { - return &IgroupRemoveResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupRemoveResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IgroupRemoveResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IgroupRemoveResponseResult is a structure to represent a igroup-remove Response Result ZAPI object -type IgroupRemoveResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewIgroupRemoveRequest is a factory method for creating new instances of IgroupRemoveRequest objects -func NewIgroupRemoveRequest() *IgroupRemoveRequest { - return &IgroupRemoveRequest{} -} - -// NewIgroupRemoveResponseResult is a factory method for creating new instances of IgroupRemoveResponseResult objects -func NewIgroupRemoveResponseResult() *IgroupRemoveResponseResult { - return &IgroupRemoveResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IgroupRemoveRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IgroupRemoveResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupRemoveRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IgroupRemoveResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupRemoveRequest) ExecuteUsing(zr *ZapiRunner) (*IgroupRemoveResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IgroupRemoveRequest) executeWithoutIteration(zr *ZapiRunner) (*IgroupRemoveResponse, error) { - result, err := zr.ExecuteUsing(o, "IgroupRemoveRequest", NewIgroupRemoveResponse()) - if result == nil { - return nil, err - } - return result.(*IgroupRemoveResponse), err -} - -// Force is a 'getter' method -func (o *IgroupRemoveRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *IgroupRemoveRequest) SetForce(newValue bool) *IgroupRemoveRequest { - o.ForcePtr = &newValue - return o -} - -// Initiator is a 'getter' method -func (o *IgroupRemoveRequest) Initiator() string { - r := *o.InitiatorPtr - return r -} - -// SetInitiator is a fluent style 'setter' method that can be chained -func (o *IgroupRemoveRequest) SetInitiator(newValue string) *IgroupRemoveRequest { - o.InitiatorPtr = &newValue - return o -} - -// InitiatorGroupName is a 'getter' method -func (o *IgroupRemoveRequest) InitiatorGroupName() string { - r := *o.InitiatorGroupNamePtr - return r -} - -// SetInitiatorGroupName is a fluent style 'setter' method that can be chained -func (o *IgroupRemoveRequest) SetInitiatorGroupName(newValue string) *IgroupRemoveRequest { - o.InitiatorGroupNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-interface-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-interface-get-iter.go deleted file mode 100644 index 3b9f2a33f..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-interface-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IscsiInterfaceGetIterRequest is a structure to represent a iscsi-interface-get-iter Request ZAPI object -type IscsiInterfaceGetIterRequest struct { - XMLName xml.Name `xml:"iscsi-interface-get-iter"` - DesiredAttributesPtr *IscsiInterfaceGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *IscsiInterfaceGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// IscsiInterfaceGetIterResponse is a structure to represent a iscsi-interface-get-iter Response ZAPI object -type IscsiInterfaceGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IscsiInterfaceGetIterResponseResult `xml:"results"` -} - -// NewIscsiInterfaceGetIterResponse is a factory method for creating new instances of IscsiInterfaceGetIterResponse objects -func NewIscsiInterfaceGetIterResponse() *IscsiInterfaceGetIterResponse { - return &IscsiInterfaceGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IscsiInterfaceGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IscsiInterfaceGetIterResponseResult is a structure to represent a iscsi-interface-get-iter Response Result ZAPI object -type IscsiInterfaceGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *IscsiInterfaceGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewIscsiInterfaceGetIterRequest is a factory method for creating new instances of IscsiInterfaceGetIterRequest objects -func NewIscsiInterfaceGetIterRequest() *IscsiInterfaceGetIterRequest { - return &IscsiInterfaceGetIterRequest{} -} - -// NewIscsiInterfaceGetIterResponseResult is a factory method for creating new instances of IscsiInterfaceGetIterResponseResult objects -func NewIscsiInterfaceGetIterResponseResult() *IscsiInterfaceGetIterResponseResult { - return &IscsiInterfaceGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IscsiInterfaceGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IscsiInterfaceGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IscsiInterfaceGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*IscsiInterfaceGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IscsiInterfaceGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*IscsiInterfaceGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "IscsiInterfaceGetIterRequest", NewIscsiInterfaceGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*IscsiInterfaceGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *IscsiInterfaceGetIterRequest) executeWithIteration(zr *ZapiRunner) (*IscsiInterfaceGetIterResponse, error) { - combined := NewIscsiInterfaceGetIterResponse() - combined.Result.SetAttributesList(IscsiInterfaceGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(IscsiInterfaceGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// IscsiInterfaceGetIterRequestDesiredAttributes is a wrapper -type IscsiInterfaceGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - IscsiInterfaceListEntryInfoPtr *IscsiInterfaceListEntryInfoType `xml:"iscsi-interface-list-entry-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IscsiInterfaceListEntryInfo is a 'getter' method -func (o *IscsiInterfaceGetIterRequestDesiredAttributes) IscsiInterfaceListEntryInfo() IscsiInterfaceListEntryInfoType { - r := *o.IscsiInterfaceListEntryInfoPtr - return r -} - -// SetIscsiInterfaceListEntryInfo is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterRequestDesiredAttributes) SetIscsiInterfaceListEntryInfo(newValue IscsiInterfaceListEntryInfoType) *IscsiInterfaceGetIterRequestDesiredAttributes { - o.IscsiInterfaceListEntryInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *IscsiInterfaceGetIterRequest) DesiredAttributes() IscsiInterfaceGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterRequest) SetDesiredAttributes(newValue IscsiInterfaceGetIterRequestDesiredAttributes) *IscsiInterfaceGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *IscsiInterfaceGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterRequest) SetMaxRecords(newValue int) *IscsiInterfaceGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// IscsiInterfaceGetIterRequestQuery is a wrapper -type IscsiInterfaceGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - IscsiInterfaceListEntryInfoPtr *IscsiInterfaceListEntryInfoType `xml:"iscsi-interface-list-entry-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IscsiInterfaceListEntryInfo is a 'getter' method -func (o *IscsiInterfaceGetIterRequestQuery) IscsiInterfaceListEntryInfo() IscsiInterfaceListEntryInfoType { - r := *o.IscsiInterfaceListEntryInfoPtr - return r -} - -// SetIscsiInterfaceListEntryInfo is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterRequestQuery) SetIscsiInterfaceListEntryInfo(newValue IscsiInterfaceListEntryInfoType) *IscsiInterfaceGetIterRequestQuery { - o.IscsiInterfaceListEntryInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *IscsiInterfaceGetIterRequest) Query() IscsiInterfaceGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterRequest) SetQuery(newValue IscsiInterfaceGetIterRequestQuery) *IscsiInterfaceGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *IscsiInterfaceGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterRequest) SetTag(newValue string) *IscsiInterfaceGetIterRequest { - o.TagPtr = &newValue - return o -} - -// IscsiInterfaceGetIterResponseResultAttributesList is a wrapper -type IscsiInterfaceGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - IscsiInterfaceListEntryInfoPtr []IscsiInterfaceListEntryInfoType `xml:"iscsi-interface-list-entry-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IscsiInterfaceListEntryInfo is a 'getter' method -func (o *IscsiInterfaceGetIterResponseResultAttributesList) IscsiInterfaceListEntryInfo() []IscsiInterfaceListEntryInfoType { - r := o.IscsiInterfaceListEntryInfoPtr - return r -} - -// SetIscsiInterfaceListEntryInfo is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterResponseResultAttributesList) SetIscsiInterfaceListEntryInfo(newValue []IscsiInterfaceListEntryInfoType) *IscsiInterfaceGetIterResponseResultAttributesList { - newSlice := make([]IscsiInterfaceListEntryInfoType, len(newValue)) - copy(newSlice, newValue) - o.IscsiInterfaceListEntryInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *IscsiInterfaceGetIterResponseResultAttributesList) values() []IscsiInterfaceListEntryInfoType { - r := o.IscsiInterfaceListEntryInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterResponseResultAttributesList) setValues(newValue []IscsiInterfaceListEntryInfoType) *IscsiInterfaceGetIterResponseResultAttributesList { - newSlice := make([]IscsiInterfaceListEntryInfoType, len(newValue)) - copy(newSlice, newValue) - o.IscsiInterfaceListEntryInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *IscsiInterfaceGetIterResponseResult) AttributesList() IscsiInterfaceGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterResponseResult) SetAttributesList(newValue IscsiInterfaceGetIterResponseResultAttributesList) *IscsiInterfaceGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *IscsiInterfaceGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterResponseResult) SetNextTag(newValue string) *IscsiInterfaceGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *IscsiInterfaceGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceGetIterResponseResult) SetNumRecords(newValue int) *IscsiInterfaceGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-node-get-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-node-get-name.go deleted file mode 100644 index acfc10c40..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-node-get-name.go +++ /dev/null @@ -1,115 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IscsiNodeGetNameRequest is a structure to represent a iscsi-node-get-name Request ZAPI object -type IscsiNodeGetNameRequest struct { - XMLName xml.Name `xml:"iscsi-node-get-name"` -} - -// IscsiNodeGetNameResponse is a structure to represent a iscsi-node-get-name Response ZAPI object -type IscsiNodeGetNameResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IscsiNodeGetNameResponseResult `xml:"results"` -} - -// NewIscsiNodeGetNameResponse is a factory method for creating new instances of IscsiNodeGetNameResponse objects -func NewIscsiNodeGetNameResponse() *IscsiNodeGetNameResponse { - return &IscsiNodeGetNameResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiNodeGetNameResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IscsiNodeGetNameResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IscsiNodeGetNameResponseResult is a structure to represent a iscsi-node-get-name Response Result ZAPI object -type IscsiNodeGetNameResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - NodeNamePtr *string `xml:"node-name"` -} - -// NewIscsiNodeGetNameRequest is a factory method for creating new instances of IscsiNodeGetNameRequest objects -func NewIscsiNodeGetNameRequest() *IscsiNodeGetNameRequest { - return &IscsiNodeGetNameRequest{} -} - -// NewIscsiNodeGetNameResponseResult is a factory method for creating new instances of IscsiNodeGetNameResponseResult objects -func NewIscsiNodeGetNameResponseResult() *IscsiNodeGetNameResponseResult { - return &IscsiNodeGetNameResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IscsiNodeGetNameRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IscsiNodeGetNameResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiNodeGetNameRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiNodeGetNameResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IscsiNodeGetNameRequest) ExecuteUsing(zr *ZapiRunner) (*IscsiNodeGetNameResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IscsiNodeGetNameRequest) executeWithoutIteration(zr *ZapiRunner) (*IscsiNodeGetNameResponse, error) { - result, err := zr.ExecuteUsing(o, "IscsiNodeGetNameRequest", NewIscsiNodeGetNameResponse()) - if result == nil { - return nil, err - } - return result.(*IscsiNodeGetNameResponse), err -} - -// NodeName is a 'getter' method -func (o *IscsiNodeGetNameResponseResult) NodeName() string { - r := *o.NodeNamePtr - return r -} - -// SetNodeName is a fluent style 'setter' method that can be chained -func (o *IscsiNodeGetNameResponseResult) SetNodeName(newValue string) *IscsiNodeGetNameResponseResult { - o.NodeNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-service-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-service-get-iter.go deleted file mode 100644 index 4b44d2c9c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-iscsi-service-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IscsiServiceGetIterRequest is a structure to represent a iscsi-service-get-iter Request ZAPI object -type IscsiServiceGetIterRequest struct { - XMLName xml.Name `xml:"iscsi-service-get-iter"` - DesiredAttributesPtr *IscsiServiceGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *IscsiServiceGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// IscsiServiceGetIterResponse is a structure to represent a iscsi-service-get-iter Response ZAPI object -type IscsiServiceGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result IscsiServiceGetIterResponseResult `xml:"results"` -} - -// NewIscsiServiceGetIterResponse is a factory method for creating new instances of IscsiServiceGetIterResponse objects -func NewIscsiServiceGetIterResponse() *IscsiServiceGetIterResponse { - return &IscsiServiceGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *IscsiServiceGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// IscsiServiceGetIterResponseResult is a structure to represent a iscsi-service-get-iter Response Result ZAPI object -type IscsiServiceGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *IscsiServiceGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewIscsiServiceGetIterRequest is a factory method for creating new instances of IscsiServiceGetIterRequest objects -func NewIscsiServiceGetIterRequest() *IscsiServiceGetIterRequest { - return &IscsiServiceGetIterRequest{} -} - -// NewIscsiServiceGetIterResponseResult is a factory method for creating new instances of IscsiServiceGetIterResponseResult objects -func NewIscsiServiceGetIterResponseResult() *IscsiServiceGetIterResponseResult { - return &IscsiServiceGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *IscsiServiceGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *IscsiServiceGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IscsiServiceGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*IscsiServiceGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *IscsiServiceGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*IscsiServiceGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "IscsiServiceGetIterRequest", NewIscsiServiceGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*IscsiServiceGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *IscsiServiceGetIterRequest) executeWithIteration(zr *ZapiRunner) (*IscsiServiceGetIterResponse, error) { - combined := NewIscsiServiceGetIterResponse() - combined.Result.SetAttributesList(IscsiServiceGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(IscsiServiceGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// IscsiServiceGetIterRequestDesiredAttributes is a wrapper -type IscsiServiceGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - IscsiServiceInfoPtr *IscsiServiceInfoType `xml:"iscsi-service-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IscsiServiceInfo is a 'getter' method -func (o *IscsiServiceGetIterRequestDesiredAttributes) IscsiServiceInfo() IscsiServiceInfoType { - r := *o.IscsiServiceInfoPtr - return r -} - -// SetIscsiServiceInfo is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterRequestDesiredAttributes) SetIscsiServiceInfo(newValue IscsiServiceInfoType) *IscsiServiceGetIterRequestDesiredAttributes { - o.IscsiServiceInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *IscsiServiceGetIterRequest) DesiredAttributes() IscsiServiceGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterRequest) SetDesiredAttributes(newValue IscsiServiceGetIterRequestDesiredAttributes) *IscsiServiceGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *IscsiServiceGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterRequest) SetMaxRecords(newValue int) *IscsiServiceGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// IscsiServiceGetIterRequestQuery is a wrapper -type IscsiServiceGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - IscsiServiceInfoPtr *IscsiServiceInfoType `xml:"iscsi-service-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IscsiServiceInfo is a 'getter' method -func (o *IscsiServiceGetIterRequestQuery) IscsiServiceInfo() IscsiServiceInfoType { - r := *o.IscsiServiceInfoPtr - return r -} - -// SetIscsiServiceInfo is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterRequestQuery) SetIscsiServiceInfo(newValue IscsiServiceInfoType) *IscsiServiceGetIterRequestQuery { - o.IscsiServiceInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *IscsiServiceGetIterRequest) Query() IscsiServiceGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterRequest) SetQuery(newValue IscsiServiceGetIterRequestQuery) *IscsiServiceGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *IscsiServiceGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterRequest) SetTag(newValue string) *IscsiServiceGetIterRequest { - o.TagPtr = &newValue - return o -} - -// IscsiServiceGetIterResponseResultAttributesList is a wrapper -type IscsiServiceGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - IscsiServiceInfoPtr []IscsiServiceInfoType `xml:"iscsi-service-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IscsiServiceInfo is a 'getter' method -func (o *IscsiServiceGetIterResponseResultAttributesList) IscsiServiceInfo() []IscsiServiceInfoType { - r := o.IscsiServiceInfoPtr - return r -} - -// SetIscsiServiceInfo is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterResponseResultAttributesList) SetIscsiServiceInfo(newValue []IscsiServiceInfoType) *IscsiServiceGetIterResponseResultAttributesList { - newSlice := make([]IscsiServiceInfoType, len(newValue)) - copy(newSlice, newValue) - o.IscsiServiceInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *IscsiServiceGetIterResponseResultAttributesList) values() []IscsiServiceInfoType { - r := o.IscsiServiceInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterResponseResultAttributesList) setValues(newValue []IscsiServiceInfoType) *IscsiServiceGetIterResponseResultAttributesList { - newSlice := make([]IscsiServiceInfoType, len(newValue)) - copy(newSlice, newValue) - o.IscsiServiceInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *IscsiServiceGetIterResponseResult) AttributesList() IscsiServiceGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterResponseResult) SetAttributesList(newValue IscsiServiceGetIterResponseResultAttributesList) *IscsiServiceGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *IscsiServiceGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterResponseResult) SetNextTag(newValue string) *IscsiServiceGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *IscsiServiceGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *IscsiServiceGetIterResponseResult) SetNumRecords(newValue int) *IscsiServiceGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-job-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-job-get-iter.go deleted file mode 100644 index bac081baa..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-job-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// JobGetIterRequest is a structure to represent a job-get-iter Request ZAPI object -type JobGetIterRequest struct { - XMLName xml.Name `xml:"job-get-iter"` - DesiredAttributesPtr *JobGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *JobGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// JobGetIterResponse is a structure to represent a job-get-iter Response ZAPI object -type JobGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result JobGetIterResponseResult `xml:"results"` -} - -// NewJobGetIterResponse is a factory method for creating new instances of JobGetIterResponse objects -func NewJobGetIterResponse() *JobGetIterResponse { - return &JobGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *JobGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// JobGetIterResponseResult is a structure to represent a job-get-iter Response Result ZAPI object -type JobGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *JobGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewJobGetIterRequest is a factory method for creating new instances of JobGetIterRequest objects -func NewJobGetIterRequest() *JobGetIterRequest { - return &JobGetIterRequest{} -} - -// NewJobGetIterResponseResult is a factory method for creating new instances of JobGetIterResponseResult objects -func NewJobGetIterResponseResult() *JobGetIterResponseResult { - return &JobGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *JobGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *JobGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *JobGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*JobGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *JobGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*JobGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "JobGetIterRequest", NewJobGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*JobGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *JobGetIterRequest) executeWithIteration(zr *ZapiRunner) (*JobGetIterResponse, error) { - combined := NewJobGetIterResponse() - combined.Result.SetAttributesList(JobGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(JobGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// JobGetIterRequestDesiredAttributes is a wrapper -type JobGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - JobInfoPtr *JobInfoType `xml:"job-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// JobInfo is a 'getter' method -func (o *JobGetIterRequestDesiredAttributes) JobInfo() JobInfoType { - r := *o.JobInfoPtr - return r -} - -// SetJobInfo is a fluent style 'setter' method that can be chained -func (o *JobGetIterRequestDesiredAttributes) SetJobInfo(newValue JobInfoType) *JobGetIterRequestDesiredAttributes { - o.JobInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *JobGetIterRequest) DesiredAttributes() JobGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *JobGetIterRequest) SetDesiredAttributes(newValue JobGetIterRequestDesiredAttributes) *JobGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *JobGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *JobGetIterRequest) SetMaxRecords(newValue int) *JobGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// JobGetIterRequestQuery is a wrapper -type JobGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - JobInfoPtr *JobInfoType `xml:"job-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// JobInfo is a 'getter' method -func (o *JobGetIterRequestQuery) JobInfo() JobInfoType { - r := *o.JobInfoPtr - return r -} - -// SetJobInfo is a fluent style 'setter' method that can be chained -func (o *JobGetIterRequestQuery) SetJobInfo(newValue JobInfoType) *JobGetIterRequestQuery { - o.JobInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *JobGetIterRequest) Query() JobGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *JobGetIterRequest) SetQuery(newValue JobGetIterRequestQuery) *JobGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *JobGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *JobGetIterRequest) SetTag(newValue string) *JobGetIterRequest { - o.TagPtr = &newValue - return o -} - -// JobGetIterResponseResultAttributesList is a wrapper -type JobGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - JobInfoPtr []JobInfoType `xml:"job-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// JobInfo is a 'getter' method -func (o *JobGetIterResponseResultAttributesList) JobInfo() []JobInfoType { - r := o.JobInfoPtr - return r -} - -// SetJobInfo is a fluent style 'setter' method that can be chained -func (o *JobGetIterResponseResultAttributesList) SetJobInfo(newValue []JobInfoType) *JobGetIterResponseResultAttributesList { - newSlice := make([]JobInfoType, len(newValue)) - copy(newSlice, newValue) - o.JobInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *JobGetIterResponseResultAttributesList) values() []JobInfoType { - r := o.JobInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *JobGetIterResponseResultAttributesList) setValues(newValue []JobInfoType) *JobGetIterResponseResultAttributesList { - newSlice := make([]JobInfoType, len(newValue)) - copy(newSlice, newValue) - o.JobInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *JobGetIterResponseResult) AttributesList() JobGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *JobGetIterResponseResult) SetAttributesList(newValue JobGetIterResponseResultAttributesList) *JobGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *JobGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *JobGetIterResponseResult) SetNextTag(newValue string) *JobGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *JobGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *JobGetIterResponseResult) SetNumRecords(newValue int) *JobGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-create-by-size.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-create-by-size.go deleted file mode 100644 index 97f28c37b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-create-by-size.go +++ /dev/null @@ -1,297 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunCreateBySizeRequest is a structure to represent a lun-create-by-size Request ZAPI object -type LunCreateBySizeRequest struct { - XMLName xml.Name `xml:"lun-create-by-size"` - ApplicationPtr *string `xml:"application"` - CachingPolicyPtr *string `xml:"caching-policy"` - ClassPtr *string `xml:"class"` - CommentPtr *string `xml:"comment"` - ForeignDiskPtr *string `xml:"foreign-disk"` - OstypePtr *LunOsTypeType `xml:"ostype"` - PathPtr *string `xml:"path"` - PrefixSizePtr *int `xml:"prefix-size"` - QosPolicyGroupPtr *string `xml:"qos-policy-group"` - SizePtr *int `xml:"size"` - SpaceAllocationEnabledPtr *bool `xml:"space-allocation-enabled"` - SpaceReservationEnabledPtr *bool `xml:"space-reservation-enabled"` - TypePtr *LunOsTypeType `xml:"type"` - UseExactSizePtr *bool `xml:"use-exact-size"` -} - -// LunCreateBySizeResponse is a structure to represent a lun-create-by-size Response ZAPI object -type LunCreateBySizeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunCreateBySizeResponseResult `xml:"results"` -} - -// NewLunCreateBySizeResponse is a factory method for creating new instances of LunCreateBySizeResponse objects -func NewLunCreateBySizeResponse() *LunCreateBySizeResponse { - return &LunCreateBySizeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunCreateBySizeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunCreateBySizeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunCreateBySizeResponseResult is a structure to represent a lun-create-by-size Response Result ZAPI object -type LunCreateBySizeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ActualSizePtr *int `xml:"actual-size"` -} - -// NewLunCreateBySizeRequest is a factory method for creating new instances of LunCreateBySizeRequest objects -func NewLunCreateBySizeRequest() *LunCreateBySizeRequest { - return &LunCreateBySizeRequest{} -} - -// NewLunCreateBySizeResponseResult is a factory method for creating new instances of LunCreateBySizeResponseResult objects -func NewLunCreateBySizeResponseResult() *LunCreateBySizeResponseResult { - return &LunCreateBySizeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunCreateBySizeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunCreateBySizeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunCreateBySizeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunCreateBySizeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunCreateBySizeRequest) ExecuteUsing(zr *ZapiRunner) (*LunCreateBySizeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunCreateBySizeRequest) executeWithoutIteration(zr *ZapiRunner) (*LunCreateBySizeResponse, error) { - result, err := zr.ExecuteUsing(o, "LunCreateBySizeRequest", NewLunCreateBySizeResponse()) - if result == nil { - return nil, err - } - return result.(*LunCreateBySizeResponse), err -} - -// Application is a 'getter' method -func (o *LunCreateBySizeRequest) Application() string { - r := *o.ApplicationPtr - return r -} - -// SetApplication is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetApplication(newValue string) *LunCreateBySizeRequest { - o.ApplicationPtr = &newValue - return o -} - -// CachingPolicy is a 'getter' method -func (o *LunCreateBySizeRequest) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetCachingPolicy(newValue string) *LunCreateBySizeRequest { - o.CachingPolicyPtr = &newValue - return o -} - -// Class is a 'getter' method -func (o *LunCreateBySizeRequest) Class() string { - r := *o.ClassPtr - return r -} - -// SetClass is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetClass(newValue string) *LunCreateBySizeRequest { - o.ClassPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *LunCreateBySizeRequest) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetComment(newValue string) *LunCreateBySizeRequest { - o.CommentPtr = &newValue - return o -} - -// ForeignDisk is a 'getter' method -func (o *LunCreateBySizeRequest) ForeignDisk() string { - r := *o.ForeignDiskPtr - return r -} - -// SetForeignDisk is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetForeignDisk(newValue string) *LunCreateBySizeRequest { - o.ForeignDiskPtr = &newValue - return o -} - -// Ostype is a 'getter' method -func (o *LunCreateBySizeRequest) Ostype() LunOsTypeType { - r := *o.OstypePtr - return r -} - -// SetOstype is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetOstype(newValue LunOsTypeType) *LunCreateBySizeRequest { - o.OstypePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunCreateBySizeRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetPath(newValue string) *LunCreateBySizeRequest { - o.PathPtr = &newValue - return o -} - -// PrefixSize is a 'getter' method -func (o *LunCreateBySizeRequest) PrefixSize() int { - r := *o.PrefixSizePtr - return r -} - -// SetPrefixSize is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetPrefixSize(newValue int) *LunCreateBySizeRequest { - o.PrefixSizePtr = &newValue - return o -} - -// QosPolicyGroup is a 'getter' method -func (o *LunCreateBySizeRequest) QosPolicyGroup() string { - r := *o.QosPolicyGroupPtr - return r -} - -// SetQosPolicyGroup is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetQosPolicyGroup(newValue string) *LunCreateBySizeRequest { - o.QosPolicyGroupPtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *LunCreateBySizeRequest) Size() int { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetSize(newValue int) *LunCreateBySizeRequest { - o.SizePtr = &newValue - return o -} - -// SpaceAllocationEnabled is a 'getter' method -func (o *LunCreateBySizeRequest) SpaceAllocationEnabled() bool { - r := *o.SpaceAllocationEnabledPtr - return r -} - -// SetSpaceAllocationEnabled is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetSpaceAllocationEnabled(newValue bool) *LunCreateBySizeRequest { - o.SpaceAllocationEnabledPtr = &newValue - return o -} - -// SpaceReservationEnabled is a 'getter' method -func (o *LunCreateBySizeRequest) SpaceReservationEnabled() bool { - r := *o.SpaceReservationEnabledPtr - return r -} - -// SetSpaceReservationEnabled is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetSpaceReservationEnabled(newValue bool) *LunCreateBySizeRequest { - o.SpaceReservationEnabledPtr = &newValue - return o -} - -// Type is a 'getter' method -func (o *LunCreateBySizeRequest) Type() LunOsTypeType { - r := *o.TypePtr - return r -} - -// SetType is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetType(newValue LunOsTypeType) *LunCreateBySizeRequest { - o.TypePtr = &newValue - return o -} - -// UseExactSize is a 'getter' method -func (o *LunCreateBySizeRequest) UseExactSize() bool { - r := *o.UseExactSizePtr - return r -} - -// SetUseExactSize is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeRequest) SetUseExactSize(newValue bool) *LunCreateBySizeRequest { - o.UseExactSizePtr = &newValue - return o -} - -// ActualSize is a 'getter' method -func (o *LunCreateBySizeResponseResult) ActualSize() int { - r := *o.ActualSizePtr - return r -} - -// SetActualSize is a fluent style 'setter' method that can be chained -func (o *LunCreateBySizeResponseResult) SetActualSize(newValue int) *LunCreateBySizeResponseResult { - o.ActualSizePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-destroy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-destroy.go deleted file mode 100644 index 83af0460c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-destroy.go +++ /dev/null @@ -1,154 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunDestroyRequest is a structure to represent a lun-destroy Request ZAPI object -type LunDestroyRequest struct { - XMLName xml.Name `xml:"lun-destroy"` - DestroyApplicationLunPtr *bool `xml:"destroy-application-lun"` - DestroyFencedLunPtr *bool `xml:"destroy-fenced-lun"` - ForcePtr *bool `xml:"force"` - PathPtr *string `xml:"path"` -} - -// LunDestroyResponse is a structure to represent a lun-destroy Response ZAPI object -type LunDestroyResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunDestroyResponseResult `xml:"results"` -} - -// NewLunDestroyResponse is a factory method for creating new instances of LunDestroyResponse objects -func NewLunDestroyResponse() *LunDestroyResponse { - return &LunDestroyResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunDestroyResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunDestroyResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunDestroyResponseResult is a structure to represent a lun-destroy Response Result ZAPI object -type LunDestroyResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewLunDestroyRequest is a factory method for creating new instances of LunDestroyRequest objects -func NewLunDestroyRequest() *LunDestroyRequest { - return &LunDestroyRequest{} -} - -// NewLunDestroyResponseResult is a factory method for creating new instances of LunDestroyResponseResult objects -func NewLunDestroyResponseResult() *LunDestroyResponseResult { - return &LunDestroyResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunDestroyRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunDestroyResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunDestroyRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunDestroyResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunDestroyRequest) ExecuteUsing(zr *ZapiRunner) (*LunDestroyResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunDestroyRequest) executeWithoutIteration(zr *ZapiRunner) (*LunDestroyResponse, error) { - result, err := zr.ExecuteUsing(o, "LunDestroyRequest", NewLunDestroyResponse()) - if result == nil { - return nil, err - } - return result.(*LunDestroyResponse), err -} - -// DestroyApplicationLun is a 'getter' method -func (o *LunDestroyRequest) DestroyApplicationLun() bool { - r := *o.DestroyApplicationLunPtr - return r -} - -// SetDestroyApplicationLun is a fluent style 'setter' method that can be chained -func (o *LunDestroyRequest) SetDestroyApplicationLun(newValue bool) *LunDestroyRequest { - o.DestroyApplicationLunPtr = &newValue - return o -} - -// DestroyFencedLun is a 'getter' method -func (o *LunDestroyRequest) DestroyFencedLun() bool { - r := *o.DestroyFencedLunPtr - return r -} - -// SetDestroyFencedLun is a fluent style 'setter' method that can be chained -func (o *LunDestroyRequest) SetDestroyFencedLun(newValue bool) *LunDestroyRequest { - o.DestroyFencedLunPtr = &newValue - return o -} - -// Force is a 'getter' method -func (o *LunDestroyRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *LunDestroyRequest) SetForce(newValue bool) *LunDestroyRequest { - o.ForcePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunDestroyRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunDestroyRequest) SetPath(newValue string) *LunDestroyRequest { - o.PathPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-attribute.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-attribute.go deleted file mode 100644 index 76a602039..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-attribute.go +++ /dev/null @@ -1,141 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunGetAttributeRequest is a structure to represent a lun-get-attribute Request ZAPI object -type LunGetAttributeRequest struct { - XMLName xml.Name `xml:"lun-get-attribute"` - NamePtr *string `xml:"name"` - PathPtr *string `xml:"path"` -} - -// LunGetAttributeResponse is a structure to represent a lun-get-attribute Response ZAPI object -type LunGetAttributeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunGetAttributeResponseResult `xml:"results"` -} - -// NewLunGetAttributeResponse is a factory method for creating new instances of LunGetAttributeResponse objects -func NewLunGetAttributeResponse() *LunGetAttributeResponse { - return &LunGetAttributeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetAttributeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunGetAttributeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunGetAttributeResponseResult is a structure to represent a lun-get-attribute Response Result ZAPI object -type LunGetAttributeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ValuePtr *string `xml:"value"` -} - -// NewLunGetAttributeRequest is a factory method for creating new instances of LunGetAttributeRequest objects -func NewLunGetAttributeRequest() *LunGetAttributeRequest { - return &LunGetAttributeRequest{} -} - -// NewLunGetAttributeResponseResult is a factory method for creating new instances of LunGetAttributeResponseResult objects -func NewLunGetAttributeResponseResult() *LunGetAttributeResponseResult { - return &LunGetAttributeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunGetAttributeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunGetAttributeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetAttributeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetAttributeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetAttributeRequest) ExecuteUsing(zr *ZapiRunner) (*LunGetAttributeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetAttributeRequest) executeWithoutIteration(zr *ZapiRunner) (*LunGetAttributeResponse, error) { - result, err := zr.ExecuteUsing(o, "LunGetAttributeRequest", NewLunGetAttributeResponse()) - if result == nil { - return nil, err - } - return result.(*LunGetAttributeResponse), err -} - -// Name is a 'getter' method -func (o *LunGetAttributeRequest) Name() string { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *LunGetAttributeRequest) SetName(newValue string) *LunGetAttributeRequest { - o.NamePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunGetAttributeRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunGetAttributeRequest) SetPath(newValue string) *LunGetAttributeRequest { - o.PathPtr = &newValue - return o -} - -// Value is a 'getter' method -func (o *LunGetAttributeResponseResult) Value() string { - r := *o.ValuePtr - return r -} - -// SetValue is a fluent style 'setter' method that can be chained -func (o *LunGetAttributeResponseResult) SetValue(newValue string) *LunGetAttributeResponseResult { - o.ValuePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-geometry.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-geometry.go deleted file mode 100644 index 37f3e6bcd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-geometry.go +++ /dev/null @@ -1,193 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunGetGeometryRequest is a structure to represent a lun-get-geometry Request ZAPI object -type LunGetGeometryRequest struct { - XMLName xml.Name `xml:"lun-get-geometry"` - PathPtr *string `xml:"path"` -} - -// LunGetGeometryResponse is a structure to represent a lun-get-geometry Response ZAPI object -type LunGetGeometryResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunGetGeometryResponseResult `xml:"results"` -} - -// NewLunGetGeometryResponse is a factory method for creating new instances of LunGetGeometryResponse objects -func NewLunGetGeometryResponse() *LunGetGeometryResponse { - return &LunGetGeometryResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetGeometryResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunGetGeometryResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunGetGeometryResponseResult is a structure to represent a lun-get-geometry Response Result ZAPI object -type LunGetGeometryResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - BytesPerSectorPtr *int `xml:"bytes-per-sector"` - CylindersPtr *int `xml:"cylinders"` - MaxResizeSizePtr *int `xml:"max-resize-size"` - SectorsPerTrackPtr *int `xml:"sectors-per-track"` - SizePtr *int `xml:"size"` - TracksPerCylinderPtr *int `xml:"tracks-per-cylinder"` -} - -// NewLunGetGeometryRequest is a factory method for creating new instances of LunGetGeometryRequest objects -func NewLunGetGeometryRequest() *LunGetGeometryRequest { - return &LunGetGeometryRequest{} -} - -// NewLunGetGeometryResponseResult is a factory method for creating new instances of LunGetGeometryResponseResult objects -func NewLunGetGeometryResponseResult() *LunGetGeometryResponseResult { - return &LunGetGeometryResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunGetGeometryRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunGetGeometryResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetGeometryRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetGeometryResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetGeometryRequest) ExecuteUsing(zr *ZapiRunner) (*LunGetGeometryResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetGeometryRequest) executeWithoutIteration(zr *ZapiRunner) (*LunGetGeometryResponse, error) { - result, err := zr.ExecuteUsing(o, "LunGetGeometryRequest", NewLunGetGeometryResponse()) - if result == nil { - return nil, err - } - return result.(*LunGetGeometryResponse), err -} - -// Path is a 'getter' method -func (o *LunGetGeometryRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryRequest) SetPath(newValue string) *LunGetGeometryRequest { - o.PathPtr = &newValue - return o -} - -// BytesPerSector is a 'getter' method -func (o *LunGetGeometryResponseResult) BytesPerSector() int { - r := *o.BytesPerSectorPtr - return r -} - -// SetBytesPerSector is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryResponseResult) SetBytesPerSector(newValue int) *LunGetGeometryResponseResult { - o.BytesPerSectorPtr = &newValue - return o -} - -// Cylinders is a 'getter' method -func (o *LunGetGeometryResponseResult) Cylinders() int { - r := *o.CylindersPtr - return r -} - -// SetCylinders is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryResponseResult) SetCylinders(newValue int) *LunGetGeometryResponseResult { - o.CylindersPtr = &newValue - return o -} - -// MaxResizeSize is a 'getter' method -func (o *LunGetGeometryResponseResult) MaxResizeSize() int { - r := *o.MaxResizeSizePtr - return r -} - -// SetMaxResizeSize is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryResponseResult) SetMaxResizeSize(newValue int) *LunGetGeometryResponseResult { - o.MaxResizeSizePtr = &newValue - return o -} - -// SectorsPerTrack is a 'getter' method -func (o *LunGetGeometryResponseResult) SectorsPerTrack() int { - r := *o.SectorsPerTrackPtr - return r -} - -// SetSectorsPerTrack is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryResponseResult) SetSectorsPerTrack(newValue int) *LunGetGeometryResponseResult { - o.SectorsPerTrackPtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *LunGetGeometryResponseResult) Size() int { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryResponseResult) SetSize(newValue int) *LunGetGeometryResponseResult { - o.SizePtr = &newValue - return o -} - -// TracksPerCylinder is a 'getter' method -func (o *LunGetGeometryResponseResult) TracksPerCylinder() int { - r := *o.TracksPerCylinderPtr - return r -} - -// SetTracksPerCylinder is a fluent style 'setter' method that can be chained -func (o *LunGetGeometryResponseResult) SetTracksPerCylinder(newValue int) *LunGetGeometryResponseResult { - o.TracksPerCylinderPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-iter.go deleted file mode 100644 index d6c0457c5..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-iter.go +++ /dev/null @@ -1,386 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunGetIterRequest is a structure to represent a lun-get-iter Request ZAPI object -type LunGetIterRequest struct { - XMLName xml.Name `xml:"lun-get-iter"` - DesiredAttributesPtr *LunGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *LunGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// LunGetIterResponse is a structure to represent a lun-get-iter Response ZAPI object -type LunGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunGetIterResponseResult `xml:"results"` -} - -// NewLunGetIterResponse is a factory method for creating new instances of LunGetIterResponse objects -func NewLunGetIterResponse() *LunGetIterResponse { - return &LunGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunGetIterResponseResult is a structure to represent a lun-get-iter Response Result ZAPI object -type LunGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *LunGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` - VolumeErrorsPtr *LunGetIterResponseResultVolumeErrors `xml:"volume-errors"` -} - -// NewLunGetIterRequest is a factory method for creating new instances of LunGetIterRequest objects -func NewLunGetIterRequest() *LunGetIterRequest { - return &LunGetIterRequest{} -} - -// NewLunGetIterResponseResult is a factory method for creating new instances of LunGetIterResponseResult objects -func NewLunGetIterResponseResult() *LunGetIterResponseResult { - return &LunGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*LunGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*LunGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "LunGetIterRequest", NewLunGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*LunGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *LunGetIterRequest) executeWithIteration(zr *ZapiRunner) (*LunGetIterResponse, error) { - combined := NewLunGetIterResponse() - combined.Result.SetAttributesList(LunGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(LunGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// LunGetIterRequestDesiredAttributes is a wrapper -type LunGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - LunInfoPtr *LunInfoType `xml:"lun-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// LunInfo is a 'getter' method -func (o *LunGetIterRequestDesiredAttributes) LunInfo() LunInfoType { - r := *o.LunInfoPtr - return r -} - -// SetLunInfo is a fluent style 'setter' method that can be chained -func (o *LunGetIterRequestDesiredAttributes) SetLunInfo(newValue LunInfoType) *LunGetIterRequestDesiredAttributes { - o.LunInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *LunGetIterRequest) DesiredAttributes() LunGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *LunGetIterRequest) SetDesiredAttributes(newValue LunGetIterRequestDesiredAttributes) *LunGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *LunGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *LunGetIterRequest) SetMaxRecords(newValue int) *LunGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// LunGetIterRequestQuery is a wrapper -type LunGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - LunInfoPtr *LunInfoType `xml:"lun-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// LunInfo is a 'getter' method -func (o *LunGetIterRequestQuery) LunInfo() LunInfoType { - r := *o.LunInfoPtr - return r -} - -// SetLunInfo is a fluent style 'setter' method that can be chained -func (o *LunGetIterRequestQuery) SetLunInfo(newValue LunInfoType) *LunGetIterRequestQuery { - o.LunInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *LunGetIterRequest) Query() LunGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *LunGetIterRequest) SetQuery(newValue LunGetIterRequestQuery) *LunGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *LunGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *LunGetIterRequest) SetTag(newValue string) *LunGetIterRequest { - o.TagPtr = &newValue - return o -} - -// LunGetIterResponseResultAttributesList is a wrapper -type LunGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - LunInfoPtr []LunInfoType `xml:"lun-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// LunInfo is a 'getter' method -func (o *LunGetIterResponseResultAttributesList) LunInfo() []LunInfoType { - r := o.LunInfoPtr - return r -} - -// SetLunInfo is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResultAttributesList) SetLunInfo(newValue []LunInfoType) *LunGetIterResponseResultAttributesList { - newSlice := make([]LunInfoType, len(newValue)) - copy(newSlice, newValue) - o.LunInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *LunGetIterResponseResultAttributesList) values() []LunInfoType { - r := o.LunInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResultAttributesList) setValues(newValue []LunInfoType) *LunGetIterResponseResultAttributesList { - newSlice := make([]LunInfoType, len(newValue)) - copy(newSlice, newValue) - o.LunInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *LunGetIterResponseResult) AttributesList() LunGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResult) SetAttributesList(newValue LunGetIterResponseResultAttributesList) *LunGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *LunGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResult) SetNextTag(newValue string) *LunGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *LunGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResult) SetNumRecords(newValue int) *LunGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} - -// LunGetIterResponseResultVolumeErrors is a wrapper -type LunGetIterResponseResultVolumeErrors struct { - XMLName xml.Name `xml:"volume-errors"` - VolumeErrorPtr []VolumeErrorType `xml:"volume-error"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetIterResponseResultVolumeErrors) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeError is a 'getter' method -func (o *LunGetIterResponseResultVolumeErrors) VolumeError() []VolumeErrorType { - r := o.VolumeErrorPtr - return r -} - -// SetVolumeError is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResultVolumeErrors) SetVolumeError(newValue []VolumeErrorType) *LunGetIterResponseResultVolumeErrors { - newSlice := make([]VolumeErrorType, len(newValue)) - copy(newSlice, newValue) - o.VolumeErrorPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *LunGetIterResponseResultVolumeErrors) values() []VolumeErrorType { - r := o.VolumeErrorPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResultVolumeErrors) setValues(newValue []VolumeErrorType) *LunGetIterResponseResultVolumeErrors { - newSlice := make([]VolumeErrorType, len(newValue)) - copy(newSlice, newValue) - o.VolumeErrorPtr = newSlice - return o -} - -// VolumeErrors is a 'getter' method -func (o *LunGetIterResponseResult) VolumeErrors() LunGetIterResponseResultVolumeErrors { - r := *o.VolumeErrorsPtr - return r -} - -// SetVolumeErrors is a fluent style 'setter' method that can be chained -func (o *LunGetIterResponseResult) SetVolumeErrors(newValue LunGetIterResponseResultVolumeErrors) *LunGetIterResponseResult { - o.VolumeErrorsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-serial-number.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-serial-number.go deleted file mode 100644 index 8bb63a3ff..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-get-serial-number.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunGetSerialNumberRequest is a structure to represent a lun-get-serial-number Request ZAPI object -type LunGetSerialNumberRequest struct { - XMLName xml.Name `xml:"lun-get-serial-number"` - PathPtr *string `xml:"path"` -} - -// LunGetSerialNumberResponse is a structure to represent a lun-get-serial-number Response ZAPI object -type LunGetSerialNumberResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunGetSerialNumberResponseResult `xml:"results"` -} - -// NewLunGetSerialNumberResponse is a factory method for creating new instances of LunGetSerialNumberResponse objects -func NewLunGetSerialNumberResponse() *LunGetSerialNumberResponse { - return &LunGetSerialNumberResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetSerialNumberResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunGetSerialNumberResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunGetSerialNumberResponseResult is a structure to represent a lun-get-serial-number Response Result ZAPI object -type LunGetSerialNumberResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - SerialNumberPtr *string `xml:"serial-number"` -} - -// NewLunGetSerialNumberRequest is a factory method for creating new instances of LunGetSerialNumberRequest objects -func NewLunGetSerialNumberRequest() *LunGetSerialNumberRequest { - return &LunGetSerialNumberRequest{} -} - -// NewLunGetSerialNumberResponseResult is a factory method for creating new instances of LunGetSerialNumberResponseResult objects -func NewLunGetSerialNumberResponseResult() *LunGetSerialNumberResponseResult { - return &LunGetSerialNumberResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunGetSerialNumberRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunGetSerialNumberResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetSerialNumberRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunGetSerialNumberResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetSerialNumberRequest) ExecuteUsing(zr *ZapiRunner) (*LunGetSerialNumberResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunGetSerialNumberRequest) executeWithoutIteration(zr *ZapiRunner) (*LunGetSerialNumberResponse, error) { - result, err := zr.ExecuteUsing(o, "LunGetSerialNumberRequest", NewLunGetSerialNumberResponse()) - if result == nil { - return nil, err - } - return result.(*LunGetSerialNumberResponse), err -} - -// Path is a 'getter' method -func (o *LunGetSerialNumberRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunGetSerialNumberRequest) SetPath(newValue string) *LunGetSerialNumberRequest { - o.PathPtr = &newValue - return o -} - -// SerialNumber is a 'getter' method -func (o *LunGetSerialNumberResponseResult) SerialNumber() string { - r := *o.SerialNumberPtr - return r -} - -// SetSerialNumber is a fluent style 'setter' method that can be chained -func (o *LunGetSerialNumberResponseResult) SetSerialNumber(newValue string) *LunGetSerialNumberResponseResult { - o.SerialNumberPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-map-list-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-map-list-info.go deleted file mode 100644 index 78d679af2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-map-list-info.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunMapListInfoRequest is a structure to represent a lun-map-list-info Request ZAPI object -type LunMapListInfoRequest struct { - XMLName xml.Name `xml:"lun-map-list-info"` - PathPtr *string `xml:"path"` -} - -// LunMapListInfoResponse is a structure to represent a lun-map-list-info Response ZAPI object -type LunMapListInfoResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunMapListInfoResponseResult `xml:"results"` -} - -// NewLunMapListInfoResponse is a factory method for creating new instances of LunMapListInfoResponse objects -func NewLunMapListInfoResponse() *LunMapListInfoResponse { - return &LunMapListInfoResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapListInfoResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunMapListInfoResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunMapListInfoResponseResult is a structure to represent a lun-map-list-info Response Result ZAPI object -type LunMapListInfoResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - InitiatorGroupsPtr *LunMapListInfoResponseResultInitiatorGroups `xml:"initiator-groups"` -} - -// NewLunMapListInfoRequest is a factory method for creating new instances of LunMapListInfoRequest objects -func NewLunMapListInfoRequest() *LunMapListInfoRequest { - return &LunMapListInfoRequest{} -} - -// NewLunMapListInfoResponseResult is a factory method for creating new instances of LunMapListInfoResponseResult objects -func NewLunMapListInfoResponseResult() *LunMapListInfoResponseResult { - return &LunMapListInfoResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunMapListInfoRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunMapListInfoResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapListInfoRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapListInfoResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunMapListInfoRequest) ExecuteUsing(zr *ZapiRunner) (*LunMapListInfoResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunMapListInfoRequest) executeWithoutIteration(zr *ZapiRunner) (*LunMapListInfoResponse, error) { - result, err := zr.ExecuteUsing(o, "LunMapListInfoRequest", NewLunMapListInfoResponse()) - if result == nil { - return nil, err - } - return result.(*LunMapListInfoResponse), err -} - -// Path is a 'getter' method -func (o *LunMapListInfoRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunMapListInfoRequest) SetPath(newValue string) *LunMapListInfoRequest { - o.PathPtr = &newValue - return o -} - -// LunMapListInfoResponseResultInitiatorGroups is a wrapper -type LunMapListInfoResponseResultInitiatorGroups struct { - XMLName xml.Name `xml:"initiator-groups"` - InitiatorGroupInfoPtr []InitiatorGroupInfoType `xml:"initiator-group-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapListInfoResponseResultInitiatorGroups) String() string { - return ToString(reflect.ValueOf(o)) -} - -// InitiatorGroupInfo is a 'getter' method -func (o *LunMapListInfoResponseResultInitiatorGroups) InitiatorGroupInfo() []InitiatorGroupInfoType { - r := o.InitiatorGroupInfoPtr - return r -} - -// SetInitiatorGroupInfo is a fluent style 'setter' method that can be chained -func (o *LunMapListInfoResponseResultInitiatorGroups) SetInitiatorGroupInfo(newValue []InitiatorGroupInfoType) *LunMapListInfoResponseResultInitiatorGroups { - newSlice := make([]InitiatorGroupInfoType, len(newValue)) - copy(newSlice, newValue) - o.InitiatorGroupInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *LunMapListInfoResponseResultInitiatorGroups) values() []InitiatorGroupInfoType { - r := o.InitiatorGroupInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *LunMapListInfoResponseResultInitiatorGroups) setValues(newValue []InitiatorGroupInfoType) *LunMapListInfoResponseResultInitiatorGroups { - newSlice := make([]InitiatorGroupInfoType, len(newValue)) - copy(newSlice, newValue) - o.InitiatorGroupInfoPtr = newSlice - return o -} - -// InitiatorGroups is a 'getter' method -func (o *LunMapListInfoResponseResult) InitiatorGroups() LunMapListInfoResponseResultInitiatorGroups { - r := *o.InitiatorGroupsPtr - return r -} - -// SetInitiatorGroups is a fluent style 'setter' method that can be chained -func (o *LunMapListInfoResponseResult) SetInitiatorGroups(newValue LunMapListInfoResponseResultInitiatorGroups) *LunMapListInfoResponseResult { - o.InitiatorGroupsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-map.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-map.go deleted file mode 100644 index b3e6813ed..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-map.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunMapRequest is a structure to represent a lun-map Request ZAPI object -type LunMapRequest struct { - XMLName xml.Name `xml:"lun-map"` - AdditionalReportingNodePtr *NodeNameType `xml:"additional-reporting-node"` - ForcePtr *bool `xml:"force"` - InitiatorGroupPtr *string `xml:"initiator-group"` - LunIdPtr *int `xml:"lun-id"` - PathPtr *string `xml:"path"` -} - -// LunMapResponse is a structure to represent a lun-map Response ZAPI object -type LunMapResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunMapResponseResult `xml:"results"` -} - -// NewLunMapResponse is a factory method for creating new instances of LunMapResponse objects -func NewLunMapResponse() *LunMapResponse { - return &LunMapResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunMapResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunMapResponseResult is a structure to represent a lun-map Response Result ZAPI object -type LunMapResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - LunIdAssignedPtr *int `xml:"lun-id-assigned"` -} - -// NewLunMapRequest is a factory method for creating new instances of LunMapRequest objects -func NewLunMapRequest() *LunMapRequest { - return &LunMapRequest{} -} - -// NewLunMapResponseResult is a factory method for creating new instances of LunMapResponseResult objects -func NewLunMapResponseResult() *LunMapResponseResult { - return &LunMapResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunMapRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunMapResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunMapResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunMapRequest) ExecuteUsing(zr *ZapiRunner) (*LunMapResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunMapRequest) executeWithoutIteration(zr *ZapiRunner) (*LunMapResponse, error) { - result, err := zr.ExecuteUsing(o, "LunMapRequest", NewLunMapResponse()) - if result == nil { - return nil, err - } - return result.(*LunMapResponse), err -} - -// AdditionalReportingNode is a 'getter' method -func (o *LunMapRequest) AdditionalReportingNode() NodeNameType { - r := *o.AdditionalReportingNodePtr - return r -} - -// SetAdditionalReportingNode is a fluent style 'setter' method that can be chained -func (o *LunMapRequest) SetAdditionalReportingNode(newValue NodeNameType) *LunMapRequest { - o.AdditionalReportingNodePtr = &newValue - return o -} - -// Force is a 'getter' method -func (o *LunMapRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *LunMapRequest) SetForce(newValue bool) *LunMapRequest { - o.ForcePtr = &newValue - return o -} - -// InitiatorGroup is a 'getter' method -func (o *LunMapRequest) InitiatorGroup() string { - r := *o.InitiatorGroupPtr - return r -} - -// SetInitiatorGroup is a fluent style 'setter' method that can be chained -func (o *LunMapRequest) SetInitiatorGroup(newValue string) *LunMapRequest { - o.InitiatorGroupPtr = &newValue - return o -} - -// LunId is a 'getter' method -func (o *LunMapRequest) LunId() int { - r := *o.LunIdPtr - return r -} - -// SetLunId is a fluent style 'setter' method that can be chained -func (o *LunMapRequest) SetLunId(newValue int) *LunMapRequest { - o.LunIdPtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunMapRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunMapRequest) SetPath(newValue string) *LunMapRequest { - o.PathPtr = &newValue - return o -} - -// LunIdAssigned is a 'getter' method -func (o *LunMapResponseResult) LunIdAssigned() int { - r := *o.LunIdAssignedPtr - return r -} - -// SetLunIdAssigned is a fluent style 'setter' method that can be chained -func (o *LunMapResponseResult) SetLunIdAssigned(newValue int) *LunMapResponseResult { - o.LunIdAssignedPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-offline.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-offline.go deleted file mode 100644 index c12116642..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-offline.go +++ /dev/null @@ -1,115 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunOfflineRequest is a structure to represent a lun-offline Request ZAPI object -type LunOfflineRequest struct { - XMLName xml.Name `xml:"lun-offline"` - PathPtr *string `xml:"path"` -} - -// LunOfflineResponse is a structure to represent a lun-offline Response ZAPI object -type LunOfflineResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunOfflineResponseResult `xml:"results"` -} - -// NewLunOfflineResponse is a factory method for creating new instances of LunOfflineResponse objects -func NewLunOfflineResponse() *LunOfflineResponse { - return &LunOfflineResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunOfflineResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunOfflineResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunOfflineResponseResult is a structure to represent a lun-offline Response Result ZAPI object -type LunOfflineResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewLunOfflineRequest is a factory method for creating new instances of LunOfflineRequest objects -func NewLunOfflineRequest() *LunOfflineRequest { - return &LunOfflineRequest{} -} - -// NewLunOfflineResponseResult is a factory method for creating new instances of LunOfflineResponseResult objects -func NewLunOfflineResponseResult() *LunOfflineResponseResult { - return &LunOfflineResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunOfflineRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunOfflineResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunOfflineRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunOfflineResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunOfflineRequest) ExecuteUsing(zr *ZapiRunner) (*LunOfflineResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunOfflineRequest) executeWithoutIteration(zr *ZapiRunner) (*LunOfflineResponse, error) { - result, err := zr.ExecuteUsing(o, "LunOfflineRequest", NewLunOfflineResponse()) - if result == nil { - return nil, err - } - return result.(*LunOfflineResponse), err -} - -// Path is a 'getter' method -func (o *LunOfflineRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunOfflineRequest) SetPath(newValue string) *LunOfflineRequest { - o.PathPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-online.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-online.go deleted file mode 100644 index 4d886526a..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-online.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunOnlineRequest is a structure to represent a lun-online Request ZAPI object -type LunOnlineRequest struct { - XMLName xml.Name `xml:"lun-online"` - ForcePtr *bool `xml:"force"` - PathPtr *string `xml:"path"` -} - -// LunOnlineResponse is a structure to represent a lun-online Response ZAPI object -type LunOnlineResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunOnlineResponseResult `xml:"results"` -} - -// NewLunOnlineResponse is a factory method for creating new instances of LunOnlineResponse objects -func NewLunOnlineResponse() *LunOnlineResponse { - return &LunOnlineResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunOnlineResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunOnlineResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunOnlineResponseResult is a structure to represent a lun-online Response Result ZAPI object -type LunOnlineResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewLunOnlineRequest is a factory method for creating new instances of LunOnlineRequest objects -func NewLunOnlineRequest() *LunOnlineRequest { - return &LunOnlineRequest{} -} - -// NewLunOnlineResponseResult is a factory method for creating new instances of LunOnlineResponseResult objects -func NewLunOnlineResponseResult() *LunOnlineResponseResult { - return &LunOnlineResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunOnlineRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunOnlineResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunOnlineRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunOnlineResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunOnlineRequest) ExecuteUsing(zr *ZapiRunner) (*LunOnlineResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunOnlineRequest) executeWithoutIteration(zr *ZapiRunner) (*LunOnlineResponse, error) { - result, err := zr.ExecuteUsing(o, "LunOnlineRequest", NewLunOnlineResponse()) - if result == nil { - return nil, err - } - return result.(*LunOnlineResponse), err -} - -// Force is a 'getter' method -func (o *LunOnlineRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *LunOnlineRequest) SetForce(newValue bool) *LunOnlineRequest { - o.ForcePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunOnlineRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunOnlineRequest) SetPath(newValue string) *LunOnlineRequest { - o.PathPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-resize.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-resize.go deleted file mode 100644 index 35a326b0f..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-resize.go +++ /dev/null @@ -1,154 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunResizeRequest is a structure to represent a lun-resize Request ZAPI object -type LunResizeRequest struct { - XMLName xml.Name `xml:"lun-resize"` - ForcePtr *bool `xml:"force"` - PathPtr *string `xml:"path"` - SizePtr *int `xml:"size"` -} - -// LunResizeResponse is a structure to represent a lun-resize Response ZAPI object -type LunResizeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunResizeResponseResult `xml:"results"` -} - -// NewLunResizeResponse is a factory method for creating new instances of LunResizeResponse objects -func NewLunResizeResponse() *LunResizeResponse { - return &LunResizeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunResizeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunResizeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunResizeResponseResult is a structure to represent a lun-resize Response Result ZAPI object -type LunResizeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ActualSizePtr *int `xml:"actual-size"` -} - -// NewLunResizeRequest is a factory method for creating new instances of LunResizeRequest objects -func NewLunResizeRequest() *LunResizeRequest { - return &LunResizeRequest{} -} - -// NewLunResizeResponseResult is a factory method for creating new instances of LunResizeResponseResult objects -func NewLunResizeResponseResult() *LunResizeResponseResult { - return &LunResizeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunResizeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunResizeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunResizeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunResizeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunResizeRequest) ExecuteUsing(zr *ZapiRunner) (*LunResizeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunResizeRequest) executeWithoutIteration(zr *ZapiRunner) (*LunResizeResponse, error) { - result, err := zr.ExecuteUsing(o, "LunResizeRequest", NewLunResizeResponse()) - if result == nil { - return nil, err - } - return result.(*LunResizeResponse), err -} - -// Force is a 'getter' method -func (o *LunResizeRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *LunResizeRequest) SetForce(newValue bool) *LunResizeRequest { - o.ForcePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunResizeRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunResizeRequest) SetPath(newValue string) *LunResizeRequest { - o.PathPtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *LunResizeRequest) Size() int { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *LunResizeRequest) SetSize(newValue int) *LunResizeRequest { - o.SizePtr = &newValue - return o -} - -// ActualSize is a 'getter' method -func (o *LunResizeResponseResult) ActualSize() int { - r := *o.ActualSizePtr - return r -} - -// SetActualSize is a fluent style 'setter' method that can be chained -func (o *LunResizeResponseResult) SetActualSize(newValue int) *LunResizeResponseResult { - o.ActualSizePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-set-attribute.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-set-attribute.go deleted file mode 100644 index af16daee0..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-lun-set-attribute.go +++ /dev/null @@ -1,141 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunSetAttributeRequest is a structure to represent a lun-set-attribute Request ZAPI object -type LunSetAttributeRequest struct { - XMLName xml.Name `xml:"lun-set-attribute"` - NamePtr *string `xml:"name"` - PathPtr *string `xml:"path"` - ValuePtr *string `xml:"value"` -} - -// LunSetAttributeResponse is a structure to represent a lun-set-attribute Response ZAPI object -type LunSetAttributeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result LunSetAttributeResponseResult `xml:"results"` -} - -// NewLunSetAttributeResponse is a factory method for creating new instances of LunSetAttributeResponse objects -func NewLunSetAttributeResponse() *LunSetAttributeResponse { - return &LunSetAttributeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunSetAttributeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *LunSetAttributeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// LunSetAttributeResponseResult is a structure to represent a lun-set-attribute Response Result ZAPI object -type LunSetAttributeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewLunSetAttributeRequest is a factory method for creating new instances of LunSetAttributeRequest objects -func NewLunSetAttributeRequest() *LunSetAttributeRequest { - return &LunSetAttributeRequest{} -} - -// NewLunSetAttributeResponseResult is a factory method for creating new instances of LunSetAttributeResponseResult objects -func NewLunSetAttributeResponseResult() *LunSetAttributeResponseResult { - return &LunSetAttributeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *LunSetAttributeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *LunSetAttributeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunSetAttributeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunSetAttributeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunSetAttributeRequest) ExecuteUsing(zr *ZapiRunner) (*LunSetAttributeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *LunSetAttributeRequest) executeWithoutIteration(zr *ZapiRunner) (*LunSetAttributeResponse, error) { - result, err := zr.ExecuteUsing(o, "LunSetAttributeRequest", NewLunSetAttributeResponse()) - if result == nil { - return nil, err - } - return result.(*LunSetAttributeResponse), err -} - -// Name is a 'getter' method -func (o *LunSetAttributeRequest) Name() string { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *LunSetAttributeRequest) SetName(newValue string) *LunSetAttributeRequest { - o.NamePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunSetAttributeRequest) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunSetAttributeRequest) SetPath(newValue string) *LunSetAttributeRequest { - o.PathPtr = &newValue - return o -} - -// Value is a 'getter' method -func (o *LunSetAttributeRequest) Value() string { - r := *o.ValuePtr - return r -} - -// SetValue is a fluent style 'setter' method that can be chained -func (o *LunSetAttributeRequest) SetValue(newValue string) *LunSetAttributeRequest { - o.ValuePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-net-interface-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-net-interface-get-iter.go deleted file mode 100644 index 7268346f2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-net-interface-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// NetInterfaceGetIterRequest is a structure to represent a net-interface-get-iter Request ZAPI object -type NetInterfaceGetIterRequest struct { - XMLName xml.Name `xml:"net-interface-get-iter"` - DesiredAttributesPtr *NetInterfaceGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *NetInterfaceGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// NetInterfaceGetIterResponse is a structure to represent a net-interface-get-iter Response ZAPI object -type NetInterfaceGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result NetInterfaceGetIterResponseResult `xml:"results"` -} - -// NewNetInterfaceGetIterResponse is a factory method for creating new instances of NetInterfaceGetIterResponse objects -func NewNetInterfaceGetIterResponse() *NetInterfaceGetIterResponse { - return &NetInterfaceGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *NetInterfaceGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// NetInterfaceGetIterResponseResult is a structure to represent a net-interface-get-iter Response Result ZAPI object -type NetInterfaceGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *NetInterfaceGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewNetInterfaceGetIterRequest is a factory method for creating new instances of NetInterfaceGetIterRequest objects -func NewNetInterfaceGetIterRequest() *NetInterfaceGetIterRequest { - return &NetInterfaceGetIterRequest{} -} - -// NewNetInterfaceGetIterResponseResult is a factory method for creating new instances of NetInterfaceGetIterResponseResult objects -func NewNetInterfaceGetIterResponseResult() *NetInterfaceGetIterResponseResult { - return &NetInterfaceGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *NetInterfaceGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *NetInterfaceGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *NetInterfaceGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*NetInterfaceGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *NetInterfaceGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*NetInterfaceGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "NetInterfaceGetIterRequest", NewNetInterfaceGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*NetInterfaceGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *NetInterfaceGetIterRequest) executeWithIteration(zr *ZapiRunner) (*NetInterfaceGetIterResponse, error) { - combined := NewNetInterfaceGetIterResponse() - combined.Result.SetAttributesList(NetInterfaceGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(NetInterfaceGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// NetInterfaceGetIterRequestDesiredAttributes is a wrapper -type NetInterfaceGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - NetInterfaceInfoPtr *NetInterfaceInfoType `xml:"net-interface-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NetInterfaceInfo is a 'getter' method -func (o *NetInterfaceGetIterRequestDesiredAttributes) NetInterfaceInfo() NetInterfaceInfoType { - r := *o.NetInterfaceInfoPtr - return r -} - -// SetNetInterfaceInfo is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterRequestDesiredAttributes) SetNetInterfaceInfo(newValue NetInterfaceInfoType) *NetInterfaceGetIterRequestDesiredAttributes { - o.NetInterfaceInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *NetInterfaceGetIterRequest) DesiredAttributes() NetInterfaceGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterRequest) SetDesiredAttributes(newValue NetInterfaceGetIterRequestDesiredAttributes) *NetInterfaceGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *NetInterfaceGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterRequest) SetMaxRecords(newValue int) *NetInterfaceGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// NetInterfaceGetIterRequestQuery is a wrapper -type NetInterfaceGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - NetInterfaceInfoPtr *NetInterfaceInfoType `xml:"net-interface-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NetInterfaceInfo is a 'getter' method -func (o *NetInterfaceGetIterRequestQuery) NetInterfaceInfo() NetInterfaceInfoType { - r := *o.NetInterfaceInfoPtr - return r -} - -// SetNetInterfaceInfo is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterRequestQuery) SetNetInterfaceInfo(newValue NetInterfaceInfoType) *NetInterfaceGetIterRequestQuery { - o.NetInterfaceInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *NetInterfaceGetIterRequest) Query() NetInterfaceGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterRequest) SetQuery(newValue NetInterfaceGetIterRequestQuery) *NetInterfaceGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *NetInterfaceGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterRequest) SetTag(newValue string) *NetInterfaceGetIterRequest { - o.TagPtr = &newValue - return o -} - -// NetInterfaceGetIterResponseResultAttributesList is a wrapper -type NetInterfaceGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - NetInterfaceInfoPtr []NetInterfaceInfoType `xml:"net-interface-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NetInterfaceInfo is a 'getter' method -func (o *NetInterfaceGetIterResponseResultAttributesList) NetInterfaceInfo() []NetInterfaceInfoType { - r := o.NetInterfaceInfoPtr - return r -} - -// SetNetInterfaceInfo is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterResponseResultAttributesList) SetNetInterfaceInfo(newValue []NetInterfaceInfoType) *NetInterfaceGetIterResponseResultAttributesList { - newSlice := make([]NetInterfaceInfoType, len(newValue)) - copy(newSlice, newValue) - o.NetInterfaceInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *NetInterfaceGetIterResponseResultAttributesList) values() []NetInterfaceInfoType { - r := o.NetInterfaceInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterResponseResultAttributesList) setValues(newValue []NetInterfaceInfoType) *NetInterfaceGetIterResponseResultAttributesList { - newSlice := make([]NetInterfaceInfoType, len(newValue)) - copy(newSlice, newValue) - o.NetInterfaceInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *NetInterfaceGetIterResponseResult) AttributesList() NetInterfaceGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterResponseResult) SetAttributesList(newValue NetInterfaceGetIterResponseResultAttributesList) *NetInterfaceGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *NetInterfaceGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterResponseResult) SetNextTag(newValue string) *NetInterfaceGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *NetInterfaceGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *NetInterfaceGetIterResponseResult) SetNumRecords(newValue int) *NetInterfaceGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-create.go deleted file mode 100644 index 393bcf815..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-create.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QtreeCreateRequest is a structure to represent a qtree-create Request ZAPI object -type QtreeCreateRequest struct { - XMLName xml.Name `xml:"qtree-create"` - ExportPolicyPtr *string `xml:"export-policy"` - ModePtr *string `xml:"mode"` - OplocksPtr *string `xml:"oplocks"` - QtreePtr *string `xml:"qtree"` - SecurityStylePtr *string `xml:"security-style"` - VolumePtr *string `xml:"volume"` -} - -// QtreeCreateResponse is a structure to represent a qtree-create Response ZAPI object -type QtreeCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QtreeCreateResponseResult `xml:"results"` -} - -// NewQtreeCreateResponse is a factory method for creating new instances of QtreeCreateResponse objects -func NewQtreeCreateResponse() *QtreeCreateResponse { - return &QtreeCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QtreeCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QtreeCreateResponseResult is a structure to represent a qtree-create Response Result ZAPI object -type QtreeCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewQtreeCreateRequest is a factory method for creating new instances of QtreeCreateRequest objects -func NewQtreeCreateRequest() *QtreeCreateRequest { - return &QtreeCreateRequest{} -} - -// NewQtreeCreateResponseResult is a factory method for creating new instances of QtreeCreateResponseResult objects -func NewQtreeCreateResponseResult() *QtreeCreateResponseResult { - return &QtreeCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QtreeCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QtreeCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeCreateRequest) ExecuteUsing(zr *ZapiRunner) (*QtreeCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*QtreeCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "QtreeCreateRequest", NewQtreeCreateResponse()) - if result == nil { - return nil, err - } - return result.(*QtreeCreateResponse), err -} - -// ExportPolicy is a 'getter' method -func (o *QtreeCreateRequest) ExportPolicy() string { - r := *o.ExportPolicyPtr - return r -} - -// SetExportPolicy is a fluent style 'setter' method that can be chained -func (o *QtreeCreateRequest) SetExportPolicy(newValue string) *QtreeCreateRequest { - o.ExportPolicyPtr = &newValue - return o -} - -// Mode is a 'getter' method -func (o *QtreeCreateRequest) Mode() string { - r := *o.ModePtr - return r -} - -// SetMode is a fluent style 'setter' method that can be chained -func (o *QtreeCreateRequest) SetMode(newValue string) *QtreeCreateRequest { - o.ModePtr = &newValue - return o -} - -// Oplocks is a 'getter' method -func (o *QtreeCreateRequest) Oplocks() string { - r := *o.OplocksPtr - return r -} - -// SetOplocks is a fluent style 'setter' method that can be chained -func (o *QtreeCreateRequest) SetOplocks(newValue string) *QtreeCreateRequest { - o.OplocksPtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *QtreeCreateRequest) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *QtreeCreateRequest) SetQtree(newValue string) *QtreeCreateRequest { - o.QtreePtr = &newValue - return o -} - -// SecurityStyle is a 'getter' method -func (o *QtreeCreateRequest) SecurityStyle() string { - r := *o.SecurityStylePtr - return r -} - -// SetSecurityStyle is a fluent style 'setter' method that can be chained -func (o *QtreeCreateRequest) SetSecurityStyle(newValue string) *QtreeCreateRequest { - o.SecurityStylePtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *QtreeCreateRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QtreeCreateRequest) SetVolume(newValue string) *QtreeCreateRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-delete-async.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-delete-async.go deleted file mode 100644 index a07fd9559..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-delete-async.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QtreeDeleteAsyncRequest is a structure to represent a qtree-delete-async Request ZAPI object -type QtreeDeleteAsyncRequest struct { - XMLName xml.Name `xml:"qtree-delete-async"` - ForcePtr *bool `xml:"force"` - QtreePtr *string `xml:"qtree"` -} - -// QtreeDeleteAsyncResponse is a structure to represent a qtree-delete-async Response ZAPI object -type QtreeDeleteAsyncResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QtreeDeleteAsyncResponseResult `xml:"results"` -} - -// NewQtreeDeleteAsyncResponse is a factory method for creating new instances of QtreeDeleteAsyncResponse objects -func NewQtreeDeleteAsyncResponse() *QtreeDeleteAsyncResponse { - return &QtreeDeleteAsyncResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeDeleteAsyncResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QtreeDeleteAsyncResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QtreeDeleteAsyncResponseResult is a structure to represent a qtree-delete-async Response Result ZAPI object -type QtreeDeleteAsyncResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewQtreeDeleteAsyncRequest is a factory method for creating new instances of QtreeDeleteAsyncRequest objects -func NewQtreeDeleteAsyncRequest() *QtreeDeleteAsyncRequest { - return &QtreeDeleteAsyncRequest{} -} - -// NewQtreeDeleteAsyncResponseResult is a factory method for creating new instances of QtreeDeleteAsyncResponseResult objects -func NewQtreeDeleteAsyncResponseResult() *QtreeDeleteAsyncResponseResult { - return &QtreeDeleteAsyncResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QtreeDeleteAsyncRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QtreeDeleteAsyncResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeDeleteAsyncRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeDeleteAsyncResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeDeleteAsyncRequest) ExecuteUsing(zr *ZapiRunner) (*QtreeDeleteAsyncResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeDeleteAsyncRequest) executeWithoutIteration(zr *ZapiRunner) (*QtreeDeleteAsyncResponse, error) { - result, err := zr.ExecuteUsing(o, "QtreeDeleteAsyncRequest", NewQtreeDeleteAsyncResponse()) - if result == nil { - return nil, err - } - return result.(*QtreeDeleteAsyncResponse), err -} - -// Force is a 'getter' method -func (o *QtreeDeleteAsyncRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *QtreeDeleteAsyncRequest) SetForce(newValue bool) *QtreeDeleteAsyncRequest { - o.ForcePtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *QtreeDeleteAsyncRequest) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *QtreeDeleteAsyncRequest) SetQtree(newValue string) *QtreeDeleteAsyncRequest { - o.QtreePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *QtreeDeleteAsyncResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *QtreeDeleteAsyncResponseResult) SetResultErrorCode(newValue int) *QtreeDeleteAsyncResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *QtreeDeleteAsyncResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *QtreeDeleteAsyncResponseResult) SetResultErrorMessage(newValue string) *QtreeDeleteAsyncResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *QtreeDeleteAsyncResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *QtreeDeleteAsyncResponseResult) SetResultJobid(newValue int) *QtreeDeleteAsyncResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *QtreeDeleteAsyncResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *QtreeDeleteAsyncResponseResult) SetResultStatus(newValue string) *QtreeDeleteAsyncResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-list-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-list-iter.go deleted file mode 100644 index 6bc9b8128..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-list-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QtreeListIterRequest is a structure to represent a qtree-list-iter Request ZAPI object -type QtreeListIterRequest struct { - XMLName xml.Name `xml:"qtree-list-iter"` - DesiredAttributesPtr *QtreeListIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *QtreeListIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// QtreeListIterResponse is a structure to represent a qtree-list-iter Response ZAPI object -type QtreeListIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QtreeListIterResponseResult `xml:"results"` -} - -// NewQtreeListIterResponse is a factory method for creating new instances of QtreeListIterResponse objects -func NewQtreeListIterResponse() *QtreeListIterResponse { - return &QtreeListIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeListIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QtreeListIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QtreeListIterResponseResult is a structure to represent a qtree-list-iter Response Result ZAPI object -type QtreeListIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *QtreeListIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewQtreeListIterRequest is a factory method for creating new instances of QtreeListIterRequest objects -func NewQtreeListIterRequest() *QtreeListIterRequest { - return &QtreeListIterRequest{} -} - -// NewQtreeListIterResponseResult is a factory method for creating new instances of QtreeListIterResponseResult objects -func NewQtreeListIterResponseResult() *QtreeListIterResponseResult { - return &QtreeListIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QtreeListIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QtreeListIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeListIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeListIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeListIterRequest) ExecuteUsing(zr *ZapiRunner) (*QtreeListIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeListIterRequest) executeWithoutIteration(zr *ZapiRunner) (*QtreeListIterResponse, error) { - result, err := zr.ExecuteUsing(o, "QtreeListIterRequest", NewQtreeListIterResponse()) - if result == nil { - return nil, err - } - return result.(*QtreeListIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *QtreeListIterRequest) executeWithIteration(zr *ZapiRunner) (*QtreeListIterResponse, error) { - combined := NewQtreeListIterResponse() - combined.Result.SetAttributesList(QtreeListIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(QtreeListIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// QtreeListIterRequestDesiredAttributes is a wrapper -type QtreeListIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - QtreeInfoPtr *QtreeInfoType `xml:"qtree-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeListIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// QtreeInfo is a 'getter' method -func (o *QtreeListIterRequestDesiredAttributes) QtreeInfo() QtreeInfoType { - r := *o.QtreeInfoPtr - return r -} - -// SetQtreeInfo is a fluent style 'setter' method that can be chained -func (o *QtreeListIterRequestDesiredAttributes) SetQtreeInfo(newValue QtreeInfoType) *QtreeListIterRequestDesiredAttributes { - o.QtreeInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *QtreeListIterRequest) DesiredAttributes() QtreeListIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *QtreeListIterRequest) SetDesiredAttributes(newValue QtreeListIterRequestDesiredAttributes) *QtreeListIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *QtreeListIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *QtreeListIterRequest) SetMaxRecords(newValue int) *QtreeListIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// QtreeListIterRequestQuery is a wrapper -type QtreeListIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - QtreeInfoPtr *QtreeInfoType `xml:"qtree-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeListIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// QtreeInfo is a 'getter' method -func (o *QtreeListIterRequestQuery) QtreeInfo() QtreeInfoType { - r := *o.QtreeInfoPtr - return r -} - -// SetQtreeInfo is a fluent style 'setter' method that can be chained -func (o *QtreeListIterRequestQuery) SetQtreeInfo(newValue QtreeInfoType) *QtreeListIterRequestQuery { - o.QtreeInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *QtreeListIterRequest) Query() QtreeListIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *QtreeListIterRequest) SetQuery(newValue QtreeListIterRequestQuery) *QtreeListIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *QtreeListIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *QtreeListIterRequest) SetTag(newValue string) *QtreeListIterRequest { - o.TagPtr = &newValue - return o -} - -// QtreeListIterResponseResultAttributesList is a wrapper -type QtreeListIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - QtreeInfoPtr []QtreeInfoType `xml:"qtree-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeListIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// QtreeInfo is a 'getter' method -func (o *QtreeListIterResponseResultAttributesList) QtreeInfo() []QtreeInfoType { - r := o.QtreeInfoPtr - return r -} - -// SetQtreeInfo is a fluent style 'setter' method that can be chained -func (o *QtreeListIterResponseResultAttributesList) SetQtreeInfo(newValue []QtreeInfoType) *QtreeListIterResponseResultAttributesList { - newSlice := make([]QtreeInfoType, len(newValue)) - copy(newSlice, newValue) - o.QtreeInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *QtreeListIterResponseResultAttributesList) values() []QtreeInfoType { - r := o.QtreeInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *QtreeListIterResponseResultAttributesList) setValues(newValue []QtreeInfoType) *QtreeListIterResponseResultAttributesList { - newSlice := make([]QtreeInfoType, len(newValue)) - copy(newSlice, newValue) - o.QtreeInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *QtreeListIterResponseResult) AttributesList() QtreeListIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *QtreeListIterResponseResult) SetAttributesList(newValue QtreeListIterResponseResultAttributesList) *QtreeListIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *QtreeListIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *QtreeListIterResponseResult) SetNextTag(newValue string) *QtreeListIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *QtreeListIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *QtreeListIterResponseResult) SetNumRecords(newValue int) *QtreeListIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-rename.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-rename.go deleted file mode 100644 index ce9820e00..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-qtree-rename.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QtreeRenameRequest is a structure to represent a qtree-rename Request ZAPI object -type QtreeRenameRequest struct { - XMLName xml.Name `xml:"qtree-rename"` - NewQtreeNamePtr *string `xml:"new-qtree-name"` - QtreePtr *string `xml:"qtree"` -} - -// QtreeRenameResponse is a structure to represent a qtree-rename Response ZAPI object -type QtreeRenameResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QtreeRenameResponseResult `xml:"results"` -} - -// NewQtreeRenameResponse is a factory method for creating new instances of QtreeRenameResponse objects -func NewQtreeRenameResponse() *QtreeRenameResponse { - return &QtreeRenameResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeRenameResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QtreeRenameResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QtreeRenameResponseResult is a structure to represent a qtree-rename Response Result ZAPI object -type QtreeRenameResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewQtreeRenameRequest is a factory method for creating new instances of QtreeRenameRequest objects -func NewQtreeRenameRequest() *QtreeRenameRequest { - return &QtreeRenameRequest{} -} - -// NewQtreeRenameResponseResult is a factory method for creating new instances of QtreeRenameResponseResult objects -func NewQtreeRenameResponseResult() *QtreeRenameResponseResult { - return &QtreeRenameResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QtreeRenameRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QtreeRenameResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeRenameRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeRenameResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeRenameRequest) ExecuteUsing(zr *ZapiRunner) (*QtreeRenameResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QtreeRenameRequest) executeWithoutIteration(zr *ZapiRunner) (*QtreeRenameResponse, error) { - result, err := zr.ExecuteUsing(o, "QtreeRenameRequest", NewQtreeRenameResponse()) - if result == nil { - return nil, err - } - return result.(*QtreeRenameResponse), err -} - -// NewQtreeName is a 'getter' method -func (o *QtreeRenameRequest) NewQtreeName() string { - r := *o.NewQtreeNamePtr - return r -} - -// SetNewQtreeName is a fluent style 'setter' method that can be chained -func (o *QtreeRenameRequest) SetNewQtreeName(newValue string) *QtreeRenameRequest { - o.NewQtreeNamePtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *QtreeRenameRequest) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *QtreeRenameRequest) SetQtree(newValue string) *QtreeRenameRequest { - o.QtreePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-list-entries-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-list-entries-iter.go deleted file mode 100644 index 280bd599b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-list-entries-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaListEntriesIterRequest is a structure to represent a quota-list-entries-iter Request ZAPI object -type QuotaListEntriesIterRequest struct { - XMLName xml.Name `xml:"quota-list-entries-iter"` - DesiredAttributesPtr *QuotaListEntriesIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *QuotaListEntriesIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// QuotaListEntriesIterResponse is a structure to represent a quota-list-entries-iter Response ZAPI object -type QuotaListEntriesIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QuotaListEntriesIterResponseResult `xml:"results"` -} - -// NewQuotaListEntriesIterResponse is a factory method for creating new instances of QuotaListEntriesIterResponse objects -func NewQuotaListEntriesIterResponse() *QuotaListEntriesIterResponse { - return &QuotaListEntriesIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaListEntriesIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QuotaListEntriesIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QuotaListEntriesIterResponseResult is a structure to represent a quota-list-entries-iter Response Result ZAPI object -type QuotaListEntriesIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *QuotaListEntriesIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewQuotaListEntriesIterRequest is a factory method for creating new instances of QuotaListEntriesIterRequest objects -func NewQuotaListEntriesIterRequest() *QuotaListEntriesIterRequest { - return &QuotaListEntriesIterRequest{} -} - -// NewQuotaListEntriesIterResponseResult is a factory method for creating new instances of QuotaListEntriesIterResponseResult objects -func NewQuotaListEntriesIterResponseResult() *QuotaListEntriesIterResponseResult { - return &QuotaListEntriesIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaListEntriesIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QuotaListEntriesIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaListEntriesIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaListEntriesIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaListEntriesIterRequest) ExecuteUsing(zr *ZapiRunner) (*QuotaListEntriesIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaListEntriesIterRequest) executeWithoutIteration(zr *ZapiRunner) (*QuotaListEntriesIterResponse, error) { - result, err := zr.ExecuteUsing(o, "QuotaListEntriesIterRequest", NewQuotaListEntriesIterResponse()) - if result == nil { - return nil, err - } - return result.(*QuotaListEntriesIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *QuotaListEntriesIterRequest) executeWithIteration(zr *ZapiRunner) (*QuotaListEntriesIterResponse, error) { - combined := NewQuotaListEntriesIterResponse() - combined.Result.SetAttributesList(QuotaListEntriesIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(QuotaListEntriesIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// QuotaListEntriesIterRequestDesiredAttributes is a wrapper -type QuotaListEntriesIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - QuotaEntryPtr *QuotaEntryType `xml:"quota-entry"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaListEntriesIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// QuotaEntry is a 'getter' method -func (o *QuotaListEntriesIterRequestDesiredAttributes) QuotaEntry() QuotaEntryType { - r := *o.QuotaEntryPtr - return r -} - -// SetQuotaEntry is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterRequestDesiredAttributes) SetQuotaEntry(newValue QuotaEntryType) *QuotaListEntriesIterRequestDesiredAttributes { - o.QuotaEntryPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *QuotaListEntriesIterRequest) DesiredAttributes() QuotaListEntriesIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterRequest) SetDesiredAttributes(newValue QuotaListEntriesIterRequestDesiredAttributes) *QuotaListEntriesIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *QuotaListEntriesIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterRequest) SetMaxRecords(newValue int) *QuotaListEntriesIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// QuotaListEntriesIterRequestQuery is a wrapper -type QuotaListEntriesIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - QuotaEntryPtr *QuotaEntryType `xml:"quota-entry"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaListEntriesIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// QuotaEntry is a 'getter' method -func (o *QuotaListEntriesIterRequestQuery) QuotaEntry() QuotaEntryType { - r := *o.QuotaEntryPtr - return r -} - -// SetQuotaEntry is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterRequestQuery) SetQuotaEntry(newValue QuotaEntryType) *QuotaListEntriesIterRequestQuery { - o.QuotaEntryPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *QuotaListEntriesIterRequest) Query() QuotaListEntriesIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterRequest) SetQuery(newValue QuotaListEntriesIterRequestQuery) *QuotaListEntriesIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *QuotaListEntriesIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterRequest) SetTag(newValue string) *QuotaListEntriesIterRequest { - o.TagPtr = &newValue - return o -} - -// QuotaListEntriesIterResponseResultAttributesList is a wrapper -type QuotaListEntriesIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - QuotaEntryPtr []QuotaEntryType `xml:"quota-entry"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaListEntriesIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// QuotaEntry is a 'getter' method -func (o *QuotaListEntriesIterResponseResultAttributesList) QuotaEntry() []QuotaEntryType { - r := o.QuotaEntryPtr - return r -} - -// SetQuotaEntry is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterResponseResultAttributesList) SetQuotaEntry(newValue []QuotaEntryType) *QuotaListEntriesIterResponseResultAttributesList { - newSlice := make([]QuotaEntryType, len(newValue)) - copy(newSlice, newValue) - o.QuotaEntryPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *QuotaListEntriesIterResponseResultAttributesList) values() []QuotaEntryType { - r := o.QuotaEntryPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterResponseResultAttributesList) setValues(newValue []QuotaEntryType) *QuotaListEntriesIterResponseResultAttributesList { - newSlice := make([]QuotaEntryType, len(newValue)) - copy(newSlice, newValue) - o.QuotaEntryPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *QuotaListEntriesIterResponseResult) AttributesList() QuotaListEntriesIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterResponseResult) SetAttributesList(newValue QuotaListEntriesIterResponseResultAttributesList) *QuotaListEntriesIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *QuotaListEntriesIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterResponseResult) SetNextTag(newValue string) *QuotaListEntriesIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *QuotaListEntriesIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *QuotaListEntriesIterResponseResult) SetNumRecords(newValue int) *QuotaListEntriesIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-off.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-off.go deleted file mode 100644 index a547c99f4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-off.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaOffRequest is a structure to represent a quota-off Request ZAPI object -type QuotaOffRequest struct { - XMLName xml.Name `xml:"quota-off"` - VolumePtr *string `xml:"volume"` -} - -// QuotaOffResponse is a structure to represent a quota-off Response ZAPI object -type QuotaOffResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QuotaOffResponseResult `xml:"results"` -} - -// NewQuotaOffResponse is a factory method for creating new instances of QuotaOffResponse objects -func NewQuotaOffResponse() *QuotaOffResponse { - return &QuotaOffResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaOffResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QuotaOffResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QuotaOffResponseResult is a structure to represent a quota-off Response Result ZAPI object -type QuotaOffResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewQuotaOffRequest is a factory method for creating new instances of QuotaOffRequest objects -func NewQuotaOffRequest() *QuotaOffRequest { - return &QuotaOffRequest{} -} - -// NewQuotaOffResponseResult is a factory method for creating new instances of QuotaOffResponseResult objects -func NewQuotaOffResponseResult() *QuotaOffResponseResult { - return &QuotaOffResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaOffRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QuotaOffResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaOffRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaOffResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaOffRequest) ExecuteUsing(zr *ZapiRunner) (*QuotaOffResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaOffRequest) executeWithoutIteration(zr *ZapiRunner) (*QuotaOffResponse, error) { - result, err := zr.ExecuteUsing(o, "QuotaOffRequest", NewQuotaOffResponse()) - if result == nil { - return nil, err - } - return result.(*QuotaOffResponse), err -} - -// Volume is a 'getter' method -func (o *QuotaOffRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QuotaOffRequest) SetVolume(newValue string) *QuotaOffRequest { - o.VolumePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *QuotaOffResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *QuotaOffResponseResult) SetResultErrorCode(newValue int) *QuotaOffResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *QuotaOffResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *QuotaOffResponseResult) SetResultErrorMessage(newValue string) *QuotaOffResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *QuotaOffResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *QuotaOffResponseResult) SetResultJobid(newValue int) *QuotaOffResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *QuotaOffResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *QuotaOffResponseResult) SetResultStatus(newValue string) *QuotaOffResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-on.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-on.go deleted file mode 100644 index 77b64ee95..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-on.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaOnRequest is a structure to represent a quota-on Request ZAPI object -type QuotaOnRequest struct { - XMLName xml.Name `xml:"quota-on"` - VolumePtr *string `xml:"volume"` -} - -// QuotaOnResponse is a structure to represent a quota-on Response ZAPI object -type QuotaOnResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QuotaOnResponseResult `xml:"results"` -} - -// NewQuotaOnResponse is a factory method for creating new instances of QuotaOnResponse objects -func NewQuotaOnResponse() *QuotaOnResponse { - return &QuotaOnResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaOnResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QuotaOnResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QuotaOnResponseResult is a structure to represent a quota-on Response Result ZAPI object -type QuotaOnResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewQuotaOnRequest is a factory method for creating new instances of QuotaOnRequest objects -func NewQuotaOnRequest() *QuotaOnRequest { - return &QuotaOnRequest{} -} - -// NewQuotaOnResponseResult is a factory method for creating new instances of QuotaOnResponseResult objects -func NewQuotaOnResponseResult() *QuotaOnResponseResult { - return &QuotaOnResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaOnRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QuotaOnResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaOnRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaOnResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaOnRequest) ExecuteUsing(zr *ZapiRunner) (*QuotaOnResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaOnRequest) executeWithoutIteration(zr *ZapiRunner) (*QuotaOnResponse, error) { - result, err := zr.ExecuteUsing(o, "QuotaOnRequest", NewQuotaOnResponse()) - if result == nil { - return nil, err - } - return result.(*QuotaOnResponse), err -} - -// Volume is a 'getter' method -func (o *QuotaOnRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QuotaOnRequest) SetVolume(newValue string) *QuotaOnRequest { - o.VolumePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *QuotaOnResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *QuotaOnResponseResult) SetResultErrorCode(newValue int) *QuotaOnResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *QuotaOnResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *QuotaOnResponseResult) SetResultErrorMessage(newValue string) *QuotaOnResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *QuotaOnResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *QuotaOnResponseResult) SetResultJobid(newValue int) *QuotaOnResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *QuotaOnResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *QuotaOnResponseResult) SetResultStatus(newValue string) *QuotaOnResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-resize.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-resize.go deleted file mode 100644 index af18309ec..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-resize.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaResizeRequest is a structure to represent a quota-resize Request ZAPI object -type QuotaResizeRequest struct { - XMLName xml.Name `xml:"quota-resize"` - VolumePtr *string `xml:"volume"` -} - -// QuotaResizeResponse is a structure to represent a quota-resize Response ZAPI object -type QuotaResizeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QuotaResizeResponseResult `xml:"results"` -} - -// NewQuotaResizeResponse is a factory method for creating new instances of QuotaResizeResponse objects -func NewQuotaResizeResponse() *QuotaResizeResponse { - return &QuotaResizeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaResizeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QuotaResizeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QuotaResizeResponseResult is a structure to represent a quota-resize Response Result ZAPI object -type QuotaResizeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewQuotaResizeRequest is a factory method for creating new instances of QuotaResizeRequest objects -func NewQuotaResizeRequest() *QuotaResizeRequest { - return &QuotaResizeRequest{} -} - -// NewQuotaResizeResponseResult is a factory method for creating new instances of QuotaResizeResponseResult objects -func NewQuotaResizeResponseResult() *QuotaResizeResponseResult { - return &QuotaResizeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaResizeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QuotaResizeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaResizeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaResizeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaResizeRequest) ExecuteUsing(zr *ZapiRunner) (*QuotaResizeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaResizeRequest) executeWithoutIteration(zr *ZapiRunner) (*QuotaResizeResponse, error) { - result, err := zr.ExecuteUsing(o, "QuotaResizeRequest", NewQuotaResizeResponse()) - if result == nil { - return nil, err - } - return result.(*QuotaResizeResponse), err -} - -// Volume is a 'getter' method -func (o *QuotaResizeRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QuotaResizeRequest) SetVolume(newValue string) *QuotaResizeRequest { - o.VolumePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *QuotaResizeResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *QuotaResizeResponseResult) SetResultErrorCode(newValue int) *QuotaResizeResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *QuotaResizeResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *QuotaResizeResponseResult) SetResultErrorMessage(newValue string) *QuotaResizeResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *QuotaResizeResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *QuotaResizeResponseResult) SetResultJobid(newValue int) *QuotaResizeResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *QuotaResizeResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *QuotaResizeResponseResult) SetResultStatus(newValue string) *QuotaResizeResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-set-entry.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-set-entry.go deleted file mode 100644 index 60c71cc4f..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-set-entry.go +++ /dev/null @@ -1,245 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaSetEntryRequest is a structure to represent a quota-set-entry Request ZAPI object -type QuotaSetEntryRequest struct { - XMLName xml.Name `xml:"quota-set-entry"` - DiskLimitPtr *string `xml:"disk-limit"` - FileLimitPtr *string `xml:"file-limit"` - PerformUserMappingPtr *bool `xml:"perform-user-mapping"` - PolicyPtr *string `xml:"policy"` - QtreePtr *string `xml:"qtree"` - QuotaTargetPtr *string `xml:"quota-target"` - QuotaTypePtr *string `xml:"quota-type"` - SoftDiskLimitPtr *string `xml:"soft-disk-limit"` - SoftFileLimitPtr *string `xml:"soft-file-limit"` - ThresholdPtr *string `xml:"threshold"` - VolumePtr *string `xml:"volume"` -} - -// QuotaSetEntryResponse is a structure to represent a quota-set-entry Response ZAPI object -type QuotaSetEntryResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QuotaSetEntryResponseResult `xml:"results"` -} - -// NewQuotaSetEntryResponse is a factory method for creating new instances of QuotaSetEntryResponse objects -func NewQuotaSetEntryResponse() *QuotaSetEntryResponse { - return &QuotaSetEntryResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaSetEntryResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QuotaSetEntryResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QuotaSetEntryResponseResult is a structure to represent a quota-set-entry Response Result ZAPI object -type QuotaSetEntryResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewQuotaSetEntryRequest is a factory method for creating new instances of QuotaSetEntryRequest objects -func NewQuotaSetEntryRequest() *QuotaSetEntryRequest { - return &QuotaSetEntryRequest{} -} - -// NewQuotaSetEntryResponseResult is a factory method for creating new instances of QuotaSetEntryResponseResult objects -func NewQuotaSetEntryResponseResult() *QuotaSetEntryResponseResult { - return &QuotaSetEntryResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaSetEntryRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QuotaSetEntryResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaSetEntryRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaSetEntryResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaSetEntryRequest) ExecuteUsing(zr *ZapiRunner) (*QuotaSetEntryResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaSetEntryRequest) executeWithoutIteration(zr *ZapiRunner) (*QuotaSetEntryResponse, error) { - result, err := zr.ExecuteUsing(o, "QuotaSetEntryRequest", NewQuotaSetEntryResponse()) - if result == nil { - return nil, err - } - return result.(*QuotaSetEntryResponse), err -} - -// DiskLimit is a 'getter' method -func (o *QuotaSetEntryRequest) DiskLimit() string { - r := *o.DiskLimitPtr - return r -} - -// SetDiskLimit is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetDiskLimit(newValue string) *QuotaSetEntryRequest { - o.DiskLimitPtr = &newValue - return o -} - -// FileLimit is a 'getter' method -func (o *QuotaSetEntryRequest) FileLimit() string { - r := *o.FileLimitPtr - return r -} - -// SetFileLimit is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetFileLimit(newValue string) *QuotaSetEntryRequest { - o.FileLimitPtr = &newValue - return o -} - -// PerformUserMapping is a 'getter' method -func (o *QuotaSetEntryRequest) PerformUserMapping() bool { - r := *o.PerformUserMappingPtr - return r -} - -// SetPerformUserMapping is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetPerformUserMapping(newValue bool) *QuotaSetEntryRequest { - o.PerformUserMappingPtr = &newValue - return o -} - -// Policy is a 'getter' method -func (o *QuotaSetEntryRequest) Policy() string { - r := *o.PolicyPtr - return r -} - -// SetPolicy is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetPolicy(newValue string) *QuotaSetEntryRequest { - o.PolicyPtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *QuotaSetEntryRequest) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetQtree(newValue string) *QuotaSetEntryRequest { - o.QtreePtr = &newValue - return o -} - -// QuotaTarget is a 'getter' method -func (o *QuotaSetEntryRequest) QuotaTarget() string { - r := *o.QuotaTargetPtr - return r -} - -// SetQuotaTarget is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetQuotaTarget(newValue string) *QuotaSetEntryRequest { - o.QuotaTargetPtr = &newValue - return o -} - -// QuotaType is a 'getter' method -func (o *QuotaSetEntryRequest) QuotaType() string { - r := *o.QuotaTypePtr - return r -} - -// SetQuotaType is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetQuotaType(newValue string) *QuotaSetEntryRequest { - o.QuotaTypePtr = &newValue - return o -} - -// SoftDiskLimit is a 'getter' method -func (o *QuotaSetEntryRequest) SoftDiskLimit() string { - r := *o.SoftDiskLimitPtr - return r -} - -// SetSoftDiskLimit is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetSoftDiskLimit(newValue string) *QuotaSetEntryRequest { - o.SoftDiskLimitPtr = &newValue - return o -} - -// SoftFileLimit is a 'getter' method -func (o *QuotaSetEntryRequest) SoftFileLimit() string { - r := *o.SoftFileLimitPtr - return r -} - -// SetSoftFileLimit is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetSoftFileLimit(newValue string) *QuotaSetEntryRequest { - o.SoftFileLimitPtr = &newValue - return o -} - -// Threshold is a 'getter' method -func (o *QuotaSetEntryRequest) Threshold() string { - r := *o.ThresholdPtr - return r -} - -// SetThreshold is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetThreshold(newValue string) *QuotaSetEntryRequest { - o.ThresholdPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *QuotaSetEntryRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QuotaSetEntryRequest) SetVolume(newValue string) *QuotaSetEntryRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-status.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-status.go deleted file mode 100644 index 6e863e557..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-quota-status.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaStatusRequest is a structure to represent a quota-status Request ZAPI object -type QuotaStatusRequest struct { - XMLName xml.Name `xml:"quota-status"` - VolumePtr *string `xml:"volume"` -} - -// QuotaStatusResponse is a structure to represent a quota-status Response ZAPI object -type QuotaStatusResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result QuotaStatusResponseResult `xml:"results"` -} - -// NewQuotaStatusResponse is a factory method for creating new instances of QuotaStatusResponse objects -func NewQuotaStatusResponse() *QuotaStatusResponse { - return &QuotaStatusResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaStatusResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *QuotaStatusResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// QuotaStatusResponseResult is a structure to represent a quota-status Response Result ZAPI object -type QuotaStatusResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - PercentCompletePtr *int `xml:"percent-complete"` - QuotaErrorsPtr *string `xml:"quota-errors"` - ReasonPtr *string `xml:"reason"` - StatusPtr *string `xml:"status"` - SubstatusPtr *string `xml:"substatus"` -} - -// NewQuotaStatusRequest is a factory method for creating new instances of QuotaStatusRequest objects -func NewQuotaStatusRequest() *QuotaStatusRequest { - return &QuotaStatusRequest{} -} - -// NewQuotaStatusResponseResult is a factory method for creating new instances of QuotaStatusResponseResult objects -func NewQuotaStatusResponseResult() *QuotaStatusResponseResult { - return &QuotaStatusResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaStatusRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *QuotaStatusResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaStatusRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaStatusResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaStatusRequest) ExecuteUsing(zr *ZapiRunner) (*QuotaStatusResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *QuotaStatusRequest) executeWithoutIteration(zr *ZapiRunner) (*QuotaStatusResponse, error) { - result, err := zr.ExecuteUsing(o, "QuotaStatusRequest", NewQuotaStatusResponse()) - if result == nil { - return nil, err - } - return result.(*QuotaStatusResponse), err -} - -// Volume is a 'getter' method -func (o *QuotaStatusRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QuotaStatusRequest) SetVolume(newValue string) *QuotaStatusRequest { - o.VolumePtr = &newValue - return o -} - -// PercentComplete is a 'getter' method -func (o *QuotaStatusResponseResult) PercentComplete() int { - r := *o.PercentCompletePtr - return r -} - -// SetPercentComplete is a fluent style 'setter' method that can be chained -func (o *QuotaStatusResponseResult) SetPercentComplete(newValue int) *QuotaStatusResponseResult { - o.PercentCompletePtr = &newValue - return o -} - -// QuotaErrors is a 'getter' method -func (o *QuotaStatusResponseResult) QuotaErrors() string { - r := *o.QuotaErrorsPtr - return r -} - -// SetQuotaErrors is a fluent style 'setter' method that can be chained -func (o *QuotaStatusResponseResult) SetQuotaErrors(newValue string) *QuotaStatusResponseResult { - o.QuotaErrorsPtr = &newValue - return o -} - -// Reason is a 'getter' method -func (o *QuotaStatusResponseResult) Reason() string { - r := *o.ReasonPtr - return r -} - -// SetReason is a fluent style 'setter' method that can be chained -func (o *QuotaStatusResponseResult) SetReason(newValue string) *QuotaStatusResponseResult { - o.ReasonPtr = &newValue - return o -} - -// Status is a 'getter' method -func (o *QuotaStatusResponseResult) Status() string { - r := *o.StatusPtr - return r -} - -// SetStatus is a fluent style 'setter' method that can be chained -func (o *QuotaStatusResponseResult) SetStatus(newValue string) *QuotaStatusResponseResult { - o.StatusPtr = &newValue - return o -} - -// Substatus is a 'getter' method -func (o *QuotaStatusResponseResult) Substatus() string { - r := *o.SubstatusPtr - return r -} - -// SetSubstatus is a fluent style 'setter' method that can be chained -func (o *QuotaStatusResponseResult) SetSubstatus(newValue string) *QuotaStatusResponseResult { - o.SubstatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapmirror-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapmirror-get-iter.go deleted file mode 100644 index d17c88d43..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapmirror-get-iter.go +++ /dev/null @@ -1,347 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapmirrorGetIterRequest is a structure to represent a snapmirror-get-iter Request ZAPI object -type SnapmirrorGetIterRequest struct { - XMLName xml.Name `xml:"snapmirror-get-iter"` - DesiredAttributesPtr *SnapmirrorGetIterRequestDesiredAttributes `xml:"desired-attributes"` - ExpandPtr *bool `xml:"expand"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *SnapmirrorGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// SnapmirrorGetIterResponse is a structure to represent a snapmirror-get-iter Response ZAPI object -type SnapmirrorGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SnapmirrorGetIterResponseResult `xml:"results"` -} - -// NewSnapmirrorGetIterResponse is a factory method for creating new instances of SnapmirrorGetIterResponse objects -func NewSnapmirrorGetIterResponse() *SnapmirrorGetIterResponse { - return &SnapmirrorGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SnapmirrorGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SnapmirrorGetIterResponseResult is a structure to represent a snapmirror-get-iter Response Result ZAPI object -type SnapmirrorGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *SnapmirrorGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewSnapmirrorGetIterRequest is a factory method for creating new instances of SnapmirrorGetIterRequest objects -func NewSnapmirrorGetIterRequest() *SnapmirrorGetIterRequest { - return &SnapmirrorGetIterRequest{} -} - -// NewSnapmirrorGetIterResponseResult is a factory method for creating new instances of SnapmirrorGetIterResponseResult objects -func NewSnapmirrorGetIterResponseResult() *SnapmirrorGetIterResponseResult { - return &SnapmirrorGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapmirrorGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SnapmirrorGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapmirrorGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*SnapmirrorGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapmirrorGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*SnapmirrorGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "SnapmirrorGetIterRequest", NewSnapmirrorGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*SnapmirrorGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *SnapmirrorGetIterRequest) executeWithIteration(zr *ZapiRunner) (*SnapmirrorGetIterResponse, error) { - combined := NewSnapmirrorGetIterResponse() - combined.Result.SetAttributesList(SnapmirrorGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(SnapmirrorGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// SnapmirrorGetIterRequestDesiredAttributes is a wrapper -type SnapmirrorGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - SnapmirrorInfoPtr *SnapmirrorInfoType `xml:"snapmirror-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnapmirrorInfo is a 'getter' method -func (o *SnapmirrorGetIterRequestDesiredAttributes) SnapmirrorInfo() SnapmirrorInfoType { - r := *o.SnapmirrorInfoPtr - return r -} - -// SetSnapmirrorInfo is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequestDesiredAttributes) SetSnapmirrorInfo(newValue SnapmirrorInfoType) *SnapmirrorGetIterRequestDesiredAttributes { - o.SnapmirrorInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *SnapmirrorGetIterRequest) DesiredAttributes() SnapmirrorGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequest) SetDesiredAttributes(newValue SnapmirrorGetIterRequestDesiredAttributes) *SnapmirrorGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// Expand is a 'getter' method -func (o *SnapmirrorGetIterRequest) Expand() bool { - r := *o.ExpandPtr - return r -} - -// SetExpand is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequest) SetExpand(newValue bool) *SnapmirrorGetIterRequest { - o.ExpandPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *SnapmirrorGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequest) SetMaxRecords(newValue int) *SnapmirrorGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// SnapmirrorGetIterRequestQuery is a wrapper -type SnapmirrorGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - SnapmirrorInfoPtr *SnapmirrorInfoType `xml:"snapmirror-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnapmirrorInfo is a 'getter' method -func (o *SnapmirrorGetIterRequestQuery) SnapmirrorInfo() SnapmirrorInfoType { - r := *o.SnapmirrorInfoPtr - return r -} - -// SetSnapmirrorInfo is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequestQuery) SetSnapmirrorInfo(newValue SnapmirrorInfoType) *SnapmirrorGetIterRequestQuery { - o.SnapmirrorInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *SnapmirrorGetIterRequest) Query() SnapmirrorGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequest) SetQuery(newValue SnapmirrorGetIterRequestQuery) *SnapmirrorGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *SnapmirrorGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterRequest) SetTag(newValue string) *SnapmirrorGetIterRequest { - o.TagPtr = &newValue - return o -} - -// SnapmirrorGetIterResponseResultAttributesList is a wrapper -type SnapmirrorGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - SnapmirrorInfoPtr []SnapmirrorInfoType `xml:"snapmirror-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnapmirrorInfo is a 'getter' method -func (o *SnapmirrorGetIterResponseResultAttributesList) SnapmirrorInfo() []SnapmirrorInfoType { - r := o.SnapmirrorInfoPtr - return r -} - -// SetSnapmirrorInfo is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterResponseResultAttributesList) SetSnapmirrorInfo(newValue []SnapmirrorInfoType) *SnapmirrorGetIterResponseResultAttributesList { - newSlice := make([]SnapmirrorInfoType, len(newValue)) - copy(newSlice, newValue) - o.SnapmirrorInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *SnapmirrorGetIterResponseResultAttributesList) values() []SnapmirrorInfoType { - r := o.SnapmirrorInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterResponseResultAttributesList) setValues(newValue []SnapmirrorInfoType) *SnapmirrorGetIterResponseResultAttributesList { - newSlice := make([]SnapmirrorInfoType, len(newValue)) - copy(newSlice, newValue) - o.SnapmirrorInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *SnapmirrorGetIterResponseResult) AttributesList() SnapmirrorGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterResponseResult) SetAttributesList(newValue SnapmirrorGetIterResponseResultAttributesList) *SnapmirrorGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *SnapmirrorGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterResponseResult) SetNextTag(newValue string) *SnapmirrorGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *SnapmirrorGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *SnapmirrorGetIterResponseResult) SetNumRecords(newValue int) *SnapmirrorGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-create.go deleted file mode 100644 index 88c7b0859..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-create.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapshotCreateRequest is a structure to represent a snapshot-create Request ZAPI object -type SnapshotCreateRequest struct { - XMLName xml.Name `xml:"snapshot-create"` - AsyncPtr *bool `xml:"async"` - CommentPtr *string `xml:"comment"` - SnapmirrorLabelPtr *string `xml:"snapmirror-label"` - SnapshotPtr *string `xml:"snapshot"` - VolumePtr *string `xml:"volume"` -} - -// SnapshotCreateResponse is a structure to represent a snapshot-create Response ZAPI object -type SnapshotCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SnapshotCreateResponseResult `xml:"results"` -} - -// NewSnapshotCreateResponse is a factory method for creating new instances of SnapshotCreateResponse objects -func NewSnapshotCreateResponse() *SnapshotCreateResponse { - return &SnapshotCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SnapshotCreateResponseResult is a structure to represent a snapshot-create Response Result ZAPI object -type SnapshotCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewSnapshotCreateRequest is a factory method for creating new instances of SnapshotCreateRequest objects -func NewSnapshotCreateRequest() *SnapshotCreateRequest { - return &SnapshotCreateRequest{} -} - -// NewSnapshotCreateResponseResult is a factory method for creating new instances of SnapshotCreateResponseResult objects -func NewSnapshotCreateResponseResult() *SnapshotCreateResponseResult { - return &SnapshotCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotCreateRequest) ExecuteUsing(zr *ZapiRunner) (*SnapshotCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*SnapshotCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "SnapshotCreateRequest", NewSnapshotCreateResponse()) - if result == nil { - return nil, err - } - return result.(*SnapshotCreateResponse), err -} - -// Async is a 'getter' method -func (o *SnapshotCreateRequest) Async() bool { - r := *o.AsyncPtr - return r -} - -// SetAsync is a fluent style 'setter' method that can be chained -func (o *SnapshotCreateRequest) SetAsync(newValue bool) *SnapshotCreateRequest { - o.AsyncPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *SnapshotCreateRequest) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *SnapshotCreateRequest) SetComment(newValue string) *SnapshotCreateRequest { - o.CommentPtr = &newValue - return o -} - -// SnapmirrorLabel is a 'getter' method -func (o *SnapshotCreateRequest) SnapmirrorLabel() string { - r := *o.SnapmirrorLabelPtr - return r -} - -// SetSnapmirrorLabel is a fluent style 'setter' method that can be chained -func (o *SnapshotCreateRequest) SetSnapmirrorLabel(newValue string) *SnapshotCreateRequest { - o.SnapmirrorLabelPtr = &newValue - return o -} - -// Snapshot is a 'getter' method -func (o *SnapshotCreateRequest) Snapshot() string { - r := *o.SnapshotPtr - return r -} - -// SetSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapshotCreateRequest) SetSnapshot(newValue string) *SnapshotCreateRequest { - o.SnapshotPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *SnapshotCreateRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *SnapshotCreateRequest) SetVolume(newValue string) *SnapshotCreateRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-delete.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-delete.go deleted file mode 100644 index fe5a76764..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-delete.go +++ /dev/null @@ -1,154 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapshotDeleteRequest is a structure to represent a snapshot-delete Request ZAPI object -type SnapshotDeleteRequest struct { - XMLName xml.Name `xml:"snapshot-delete"` - IgnoreOwnersPtr *bool `xml:"ignore-owners"` - SnapshotPtr *string `xml:"snapshot"` - SnapshotInstanceUuidPtr *UUIDType `xml:"snapshot-instance-uuid"` - VolumePtr *string `xml:"volume"` -} - -// SnapshotDeleteResponse is a structure to represent a snapshot-delete Response ZAPI object -type SnapshotDeleteResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SnapshotDeleteResponseResult `xml:"results"` -} - -// NewSnapshotDeleteResponse is a factory method for creating new instances of SnapshotDeleteResponse objects -func NewSnapshotDeleteResponse() *SnapshotDeleteResponse { - return &SnapshotDeleteResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotDeleteResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotDeleteResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SnapshotDeleteResponseResult is a structure to represent a snapshot-delete Response Result ZAPI object -type SnapshotDeleteResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewSnapshotDeleteRequest is a factory method for creating new instances of SnapshotDeleteRequest objects -func NewSnapshotDeleteRequest() *SnapshotDeleteRequest { - return &SnapshotDeleteRequest{} -} - -// NewSnapshotDeleteResponseResult is a factory method for creating new instances of SnapshotDeleteResponseResult objects -func NewSnapshotDeleteResponseResult() *SnapshotDeleteResponseResult { - return &SnapshotDeleteResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotDeleteRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotDeleteResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotDeleteRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotDeleteResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotDeleteRequest) ExecuteUsing(zr *ZapiRunner) (*SnapshotDeleteResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotDeleteRequest) executeWithoutIteration(zr *ZapiRunner) (*SnapshotDeleteResponse, error) { - result, err := zr.ExecuteUsing(o, "SnapshotDeleteRequest", NewSnapshotDeleteResponse()) - if result == nil { - return nil, err - } - return result.(*SnapshotDeleteResponse), err -} - -// IgnoreOwners is a 'getter' method -func (o *SnapshotDeleteRequest) IgnoreOwners() bool { - r := *o.IgnoreOwnersPtr - return r -} - -// SetIgnoreOwners is a fluent style 'setter' method that can be chained -func (o *SnapshotDeleteRequest) SetIgnoreOwners(newValue bool) *SnapshotDeleteRequest { - o.IgnoreOwnersPtr = &newValue - return o -} - -// Snapshot is a 'getter' method -func (o *SnapshotDeleteRequest) Snapshot() string { - r := *o.SnapshotPtr - return r -} - -// SetSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapshotDeleteRequest) SetSnapshot(newValue string) *SnapshotDeleteRequest { - o.SnapshotPtr = &newValue - return o -} - -// SnapshotInstanceUuid is a 'getter' method -func (o *SnapshotDeleteRequest) SnapshotInstanceUuid() UUIDType { - r := *o.SnapshotInstanceUuidPtr - return r -} - -// SetSnapshotInstanceUuid is a fluent style 'setter' method that can be chained -func (o *SnapshotDeleteRequest) SetSnapshotInstanceUuid(newValue UUIDType) *SnapshotDeleteRequest { - o.SnapshotInstanceUuidPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *SnapshotDeleteRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *SnapshotDeleteRequest) SetVolume(newValue string) *SnapshotDeleteRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-get-iter.go deleted file mode 100644 index ae259147b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-get-iter.go +++ /dev/null @@ -1,386 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapshotGetIterRequest is a structure to represent a snapshot-get-iter Request ZAPI object -type SnapshotGetIterRequest struct { - XMLName xml.Name `xml:"snapshot-get-iter"` - DesiredAttributesPtr *SnapshotGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *SnapshotGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// SnapshotGetIterResponse is a structure to represent a snapshot-get-iter Response ZAPI object -type SnapshotGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SnapshotGetIterResponseResult `xml:"results"` -} - -// NewSnapshotGetIterResponse is a factory method for creating new instances of SnapshotGetIterResponse objects -func NewSnapshotGetIterResponse() *SnapshotGetIterResponse { - return &SnapshotGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SnapshotGetIterResponseResult is a structure to represent a snapshot-get-iter Response Result ZAPI object -type SnapshotGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *SnapshotGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` - VolumeErrorsPtr *SnapshotGetIterResponseResultVolumeErrors `xml:"volume-errors"` -} - -// NewSnapshotGetIterRequest is a factory method for creating new instances of SnapshotGetIterRequest objects -func NewSnapshotGetIterRequest() *SnapshotGetIterRequest { - return &SnapshotGetIterRequest{} -} - -// NewSnapshotGetIterResponseResult is a factory method for creating new instances of SnapshotGetIterResponseResult objects -func NewSnapshotGetIterResponseResult() *SnapshotGetIterResponseResult { - return &SnapshotGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*SnapshotGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*SnapshotGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "SnapshotGetIterRequest", NewSnapshotGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*SnapshotGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *SnapshotGetIterRequest) executeWithIteration(zr *ZapiRunner) (*SnapshotGetIterResponse, error) { - combined := NewSnapshotGetIterResponse() - combined.Result.SetAttributesList(SnapshotGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(SnapshotGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// SnapshotGetIterRequestDesiredAttributes is a wrapper -type SnapshotGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - SnapshotInfoPtr *SnapshotInfoType `xml:"snapshot-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnapshotInfo is a 'getter' method -func (o *SnapshotGetIterRequestDesiredAttributes) SnapshotInfo() SnapshotInfoType { - r := *o.SnapshotInfoPtr - return r -} - -// SetSnapshotInfo is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterRequestDesiredAttributes) SetSnapshotInfo(newValue SnapshotInfoType) *SnapshotGetIterRequestDesiredAttributes { - o.SnapshotInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *SnapshotGetIterRequest) DesiredAttributes() SnapshotGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterRequest) SetDesiredAttributes(newValue SnapshotGetIterRequestDesiredAttributes) *SnapshotGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *SnapshotGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterRequest) SetMaxRecords(newValue int) *SnapshotGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// SnapshotGetIterRequestQuery is a wrapper -type SnapshotGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - SnapshotInfoPtr *SnapshotInfoType `xml:"snapshot-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnapshotInfo is a 'getter' method -func (o *SnapshotGetIterRequestQuery) SnapshotInfo() SnapshotInfoType { - r := *o.SnapshotInfoPtr - return r -} - -// SetSnapshotInfo is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterRequestQuery) SetSnapshotInfo(newValue SnapshotInfoType) *SnapshotGetIterRequestQuery { - o.SnapshotInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *SnapshotGetIterRequest) Query() SnapshotGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterRequest) SetQuery(newValue SnapshotGetIterRequestQuery) *SnapshotGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *SnapshotGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterRequest) SetTag(newValue string) *SnapshotGetIterRequest { - o.TagPtr = &newValue - return o -} - -// SnapshotGetIterResponseResultAttributesList is a wrapper -type SnapshotGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - SnapshotInfoPtr []SnapshotInfoType `xml:"snapshot-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnapshotInfo is a 'getter' method -func (o *SnapshotGetIterResponseResultAttributesList) SnapshotInfo() []SnapshotInfoType { - r := o.SnapshotInfoPtr - return r -} - -// SetSnapshotInfo is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResultAttributesList) SetSnapshotInfo(newValue []SnapshotInfoType) *SnapshotGetIterResponseResultAttributesList { - newSlice := make([]SnapshotInfoType, len(newValue)) - copy(newSlice, newValue) - o.SnapshotInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *SnapshotGetIterResponseResultAttributesList) values() []SnapshotInfoType { - r := o.SnapshotInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResultAttributesList) setValues(newValue []SnapshotInfoType) *SnapshotGetIterResponseResultAttributesList { - newSlice := make([]SnapshotInfoType, len(newValue)) - copy(newSlice, newValue) - o.SnapshotInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *SnapshotGetIterResponseResult) AttributesList() SnapshotGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResult) SetAttributesList(newValue SnapshotGetIterResponseResultAttributesList) *SnapshotGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *SnapshotGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResult) SetNextTag(newValue string) *SnapshotGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *SnapshotGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResult) SetNumRecords(newValue int) *SnapshotGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} - -// SnapshotGetIterResponseResultVolumeErrors is a wrapper -type SnapshotGetIterResponseResultVolumeErrors struct { - XMLName xml.Name `xml:"volume-errors"` - VolumeErrorPtr []VolumeErrorType `xml:"volume-error"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotGetIterResponseResultVolumeErrors) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeError is a 'getter' method -func (o *SnapshotGetIterResponseResultVolumeErrors) VolumeError() []VolumeErrorType { - r := o.VolumeErrorPtr - return r -} - -// SetVolumeError is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResultVolumeErrors) SetVolumeError(newValue []VolumeErrorType) *SnapshotGetIterResponseResultVolumeErrors { - newSlice := make([]VolumeErrorType, len(newValue)) - copy(newSlice, newValue) - o.VolumeErrorPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *SnapshotGetIterResponseResultVolumeErrors) values() []VolumeErrorType { - r := o.VolumeErrorPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResultVolumeErrors) setValues(newValue []VolumeErrorType) *SnapshotGetIterResponseResultVolumeErrors { - newSlice := make([]VolumeErrorType, len(newValue)) - copy(newSlice, newValue) - o.VolumeErrorPtr = newSlice - return o -} - -// VolumeErrors is a 'getter' method -func (o *SnapshotGetIterResponseResult) VolumeErrors() SnapshotGetIterResponseResultVolumeErrors { - r := *o.VolumeErrorsPtr - return r -} - -// SetVolumeErrors is a fluent style 'setter' method that can be chained -func (o *SnapshotGetIterResponseResult) SetVolumeErrors(newValue SnapshotGetIterResponseResultVolumeErrors) *SnapshotGetIterResponseResult { - o.VolumeErrorsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-restore-volume.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-restore-volume.go deleted file mode 100644 index 4b2edce67..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-snapshot-restore-volume.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapshotRestoreVolumeRequest is a structure to represent a snapshot-restore-volume Request ZAPI object -type SnapshotRestoreVolumeRequest struct { - XMLName xml.Name `xml:"snapshot-restore-volume"` - ForcePtr *bool `xml:"force"` - PreserveLunIdsPtr *bool `xml:"preserve-lun-ids"` - SnapshotPtr *string `xml:"snapshot"` - SnapshotInstanceUuidPtr *UUIDType `xml:"snapshot-instance-uuid"` - VolumePtr *string `xml:"volume"` -} - -// SnapshotRestoreVolumeResponse is a structure to represent a snapshot-restore-volume Response ZAPI object -type SnapshotRestoreVolumeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SnapshotRestoreVolumeResponseResult `xml:"results"` -} - -// NewSnapshotRestoreVolumeResponse is a factory method for creating new instances of SnapshotRestoreVolumeResponse objects -func NewSnapshotRestoreVolumeResponse() *SnapshotRestoreVolumeResponse { - return &SnapshotRestoreVolumeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotRestoreVolumeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotRestoreVolumeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SnapshotRestoreVolumeResponseResult is a structure to represent a snapshot-restore-volume Response Result ZAPI object -type SnapshotRestoreVolumeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewSnapshotRestoreVolumeRequest is a factory method for creating new instances of SnapshotRestoreVolumeRequest objects -func NewSnapshotRestoreVolumeRequest() *SnapshotRestoreVolumeRequest { - return &SnapshotRestoreVolumeRequest{} -} - -// NewSnapshotRestoreVolumeResponseResult is a factory method for creating new instances of SnapshotRestoreVolumeResponseResult objects -func NewSnapshotRestoreVolumeResponseResult() *SnapshotRestoreVolumeResponseResult { - return &SnapshotRestoreVolumeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotRestoreVolumeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotRestoreVolumeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotRestoreVolumeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotRestoreVolumeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotRestoreVolumeRequest) ExecuteUsing(zr *ZapiRunner) (*SnapshotRestoreVolumeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SnapshotRestoreVolumeRequest) executeWithoutIteration(zr *ZapiRunner) (*SnapshotRestoreVolumeResponse, error) { - result, err := zr.ExecuteUsing(o, "SnapshotRestoreVolumeRequest", NewSnapshotRestoreVolumeResponse()) - if result == nil { - return nil, err - } - return result.(*SnapshotRestoreVolumeResponse), err -} - -// Force is a 'getter' method -func (o *SnapshotRestoreVolumeRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *SnapshotRestoreVolumeRequest) SetForce(newValue bool) *SnapshotRestoreVolumeRequest { - o.ForcePtr = &newValue - return o -} - -// PreserveLunIds is a 'getter' method -func (o *SnapshotRestoreVolumeRequest) PreserveLunIds() bool { - r := *o.PreserveLunIdsPtr - return r -} - -// SetPreserveLunIds is a fluent style 'setter' method that can be chained -func (o *SnapshotRestoreVolumeRequest) SetPreserveLunIds(newValue bool) *SnapshotRestoreVolumeRequest { - o.PreserveLunIdsPtr = &newValue - return o -} - -// Snapshot is a 'getter' method -func (o *SnapshotRestoreVolumeRequest) Snapshot() string { - r := *o.SnapshotPtr - return r -} - -// SetSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapshotRestoreVolumeRequest) SetSnapshot(newValue string) *SnapshotRestoreVolumeRequest { - o.SnapshotPtr = &newValue - return o -} - -// SnapshotInstanceUuid is a 'getter' method -func (o *SnapshotRestoreVolumeRequest) SnapshotInstanceUuid() UUIDType { - r := *o.SnapshotInstanceUuidPtr - return r -} - -// SetSnapshotInstanceUuid is a fluent style 'setter' method that can be chained -func (o *SnapshotRestoreVolumeRequest) SetSnapshotInstanceUuid(newValue UUIDType) *SnapshotRestoreVolumeRequest { - o.SnapshotInstanceUuidPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *SnapshotRestoreVolumeRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *SnapshotRestoreVolumeRequest) SetVolume(newValue string) *SnapshotRestoreVolumeRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-get-ontapi-version.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-get-ontapi-version.go deleted file mode 100644 index cfb982988..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-get-ontapi-version.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SystemGetOntapiVersionRequest is a structure to represent a system-get-ontapi-version Request ZAPI object -type SystemGetOntapiVersionRequest struct { - XMLName xml.Name `xml:"system-get-ontapi-version"` -} - -// SystemGetOntapiVersionResponse is a structure to represent a system-get-ontapi-version Response ZAPI object -type SystemGetOntapiVersionResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SystemGetOntapiVersionResponseResult `xml:"results"` -} - -// NewSystemGetOntapiVersionResponse is a factory method for creating new instances of SystemGetOntapiVersionResponse objects -func NewSystemGetOntapiVersionResponse() *SystemGetOntapiVersionResponse { - return &SystemGetOntapiVersionResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetOntapiVersionResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SystemGetOntapiVersionResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SystemGetOntapiVersionResponseResult is a structure to represent a system-get-ontapi-version Response Result ZAPI object -type SystemGetOntapiVersionResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - MajorVersionPtr *int `xml:"major-version"` - MinorVersionPtr *int `xml:"minor-version"` - NodeOntapiDetailsPtr *SystemGetOntapiVersionResponseResultNodeOntapiDetails `xml:"node-ontapi-details"` -} - -// NewSystemGetOntapiVersionRequest is a factory method for creating new instances of SystemGetOntapiVersionRequest objects -func NewSystemGetOntapiVersionRequest() *SystemGetOntapiVersionRequest { - return &SystemGetOntapiVersionRequest{} -} - -// NewSystemGetOntapiVersionResponseResult is a factory method for creating new instances of SystemGetOntapiVersionResponseResult objects -func NewSystemGetOntapiVersionResponseResult() *SystemGetOntapiVersionResponseResult { - return &SystemGetOntapiVersionResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SystemGetOntapiVersionRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SystemGetOntapiVersionResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetOntapiVersionRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetOntapiVersionResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SystemGetOntapiVersionRequest) ExecuteUsing(zr *ZapiRunner) (*SystemGetOntapiVersionResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SystemGetOntapiVersionRequest) executeWithoutIteration(zr *ZapiRunner) (*SystemGetOntapiVersionResponse, error) { - result, err := zr.ExecuteUsing(o, "SystemGetOntapiVersionRequest", NewSystemGetOntapiVersionResponse()) - if result == nil { - return nil, err - } - return result.(*SystemGetOntapiVersionResponse), err -} - -// MajorVersion is a 'getter' method -func (o *SystemGetOntapiVersionResponseResult) MajorVersion() int { - r := *o.MajorVersionPtr - return r -} - -// SetMajorVersion is a fluent style 'setter' method that can be chained -func (o *SystemGetOntapiVersionResponseResult) SetMajorVersion(newValue int) *SystemGetOntapiVersionResponseResult { - o.MajorVersionPtr = &newValue - return o -} - -// MinorVersion is a 'getter' method -func (o *SystemGetOntapiVersionResponseResult) MinorVersion() int { - r := *o.MinorVersionPtr - return r -} - -// SetMinorVersion is a fluent style 'setter' method that can be chained -func (o *SystemGetOntapiVersionResponseResult) SetMinorVersion(newValue int) *SystemGetOntapiVersionResponseResult { - o.MinorVersionPtr = &newValue - return o -} - -// SystemGetOntapiVersionResponseResultNodeOntapiDetails is a wrapper -type SystemGetOntapiVersionResponseResultNodeOntapiDetails struct { - XMLName xml.Name `xml:"node-ontapi-details"` - NodeOntapiDetailInfoPtr []NodeOntapiDetailInfoType `xml:"node-ontapi-detail-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetOntapiVersionResponseResultNodeOntapiDetails) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NodeOntapiDetailInfo is a 'getter' method -func (o *SystemGetOntapiVersionResponseResultNodeOntapiDetails) NodeOntapiDetailInfo() []NodeOntapiDetailInfoType { - r := o.NodeOntapiDetailInfoPtr - return r -} - -// SetNodeOntapiDetailInfo is a fluent style 'setter' method that can be chained -func (o *SystemGetOntapiVersionResponseResultNodeOntapiDetails) SetNodeOntapiDetailInfo(newValue []NodeOntapiDetailInfoType) *SystemGetOntapiVersionResponseResultNodeOntapiDetails { - newSlice := make([]NodeOntapiDetailInfoType, len(newValue)) - copy(newSlice, newValue) - o.NodeOntapiDetailInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *SystemGetOntapiVersionResponseResultNodeOntapiDetails) values() []NodeOntapiDetailInfoType { - r := o.NodeOntapiDetailInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SystemGetOntapiVersionResponseResultNodeOntapiDetails) setValues(newValue []NodeOntapiDetailInfoType) *SystemGetOntapiVersionResponseResultNodeOntapiDetails { - newSlice := make([]NodeOntapiDetailInfoType, len(newValue)) - copy(newSlice, newValue) - o.NodeOntapiDetailInfoPtr = newSlice - return o -} - -// NodeOntapiDetails is a 'getter' method -func (o *SystemGetOntapiVersionResponseResult) NodeOntapiDetails() SystemGetOntapiVersionResponseResultNodeOntapiDetails { - r := *o.NodeOntapiDetailsPtr - return r -} - -// SetNodeOntapiDetails is a fluent style 'setter' method that can be chained -func (o *SystemGetOntapiVersionResponseResult) SetNodeOntapiDetails(newValue SystemGetOntapiVersionResponseResultNodeOntapiDetails) *SystemGetOntapiVersionResponseResult { - o.NodeOntapiDetailsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-get-version.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-get-version.go deleted file mode 100644 index f3a179f79..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-get-version.go +++ /dev/null @@ -1,241 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SystemGetVersionRequest is a structure to represent a system-get-version Request ZAPI object -type SystemGetVersionRequest struct { - XMLName xml.Name `xml:"system-get-version"` -} - -// SystemGetVersionResponse is a structure to represent a system-get-version Response ZAPI object -type SystemGetVersionResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SystemGetVersionResponseResult `xml:"results"` -} - -// NewSystemGetVersionResponse is a factory method for creating new instances of SystemGetVersionResponse objects -func NewSystemGetVersionResponse() *SystemGetVersionResponse { - return &SystemGetVersionResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetVersionResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SystemGetVersionResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SystemGetVersionResponseResult is a structure to represent a system-get-version Response Result ZAPI object -type SystemGetVersionResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - BuildTimestampPtr *int `xml:"build-timestamp"` - IsClusteredPtr *bool `xml:"is-clustered"` - NodeVersionDetailsPtr *SystemGetVersionResponseResultNodeVersionDetails `xml:"node-version-details"` - VersionPtr *string `xml:"version"` - VersionTuplePtr *SystemGetVersionResponseResultVersionTuple `xml:"version-tuple"` -} - -// NewSystemGetVersionRequest is a factory method for creating new instances of SystemGetVersionRequest objects -func NewSystemGetVersionRequest() *SystemGetVersionRequest { - return &SystemGetVersionRequest{} -} - -// NewSystemGetVersionResponseResult is a factory method for creating new instances of SystemGetVersionResponseResult objects -func NewSystemGetVersionResponseResult() *SystemGetVersionResponseResult { - return &SystemGetVersionResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SystemGetVersionRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SystemGetVersionResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetVersionRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetVersionResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SystemGetVersionRequest) ExecuteUsing(zr *ZapiRunner) (*SystemGetVersionResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SystemGetVersionRequest) executeWithoutIteration(zr *ZapiRunner) (*SystemGetVersionResponse, error) { - result, err := zr.ExecuteUsing(o, "SystemGetVersionRequest", NewSystemGetVersionResponse()) - if result == nil { - return nil, err - } - return result.(*SystemGetVersionResponse), err -} - -// BuildTimestamp is a 'getter' method -func (o *SystemGetVersionResponseResult) BuildTimestamp() int { - r := *o.BuildTimestampPtr - return r -} - -// SetBuildTimestamp is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResult) SetBuildTimestamp(newValue int) *SystemGetVersionResponseResult { - o.BuildTimestampPtr = &newValue - return o -} - -// IsClustered is a 'getter' method -func (o *SystemGetVersionResponseResult) IsClustered() bool { - r := *o.IsClusteredPtr - return r -} - -// SetIsClustered is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResult) SetIsClustered(newValue bool) *SystemGetVersionResponseResult { - o.IsClusteredPtr = &newValue - return o -} - -// SystemGetVersionResponseResultNodeVersionDetails is a wrapper -type SystemGetVersionResponseResultNodeVersionDetails struct { - XMLName xml.Name `xml:"node-version-details"` - NodeVersionDetailInfoPtr []NodeVersionDetailInfoType `xml:"node-version-detail-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetVersionResponseResultNodeVersionDetails) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NodeVersionDetailInfo is a 'getter' method -func (o *SystemGetVersionResponseResultNodeVersionDetails) NodeVersionDetailInfo() []NodeVersionDetailInfoType { - r := o.NodeVersionDetailInfoPtr - return r -} - -// SetNodeVersionDetailInfo is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResultNodeVersionDetails) SetNodeVersionDetailInfo(newValue []NodeVersionDetailInfoType) *SystemGetVersionResponseResultNodeVersionDetails { - newSlice := make([]NodeVersionDetailInfoType, len(newValue)) - copy(newSlice, newValue) - o.NodeVersionDetailInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *SystemGetVersionResponseResultNodeVersionDetails) values() []NodeVersionDetailInfoType { - r := o.NodeVersionDetailInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResultNodeVersionDetails) setValues(newValue []NodeVersionDetailInfoType) *SystemGetVersionResponseResultNodeVersionDetails { - newSlice := make([]NodeVersionDetailInfoType, len(newValue)) - copy(newSlice, newValue) - o.NodeVersionDetailInfoPtr = newSlice - return o -} - -// NodeVersionDetails is a 'getter' method -func (o *SystemGetVersionResponseResult) NodeVersionDetails() SystemGetVersionResponseResultNodeVersionDetails { - r := *o.NodeVersionDetailsPtr - return r -} - -// SetNodeVersionDetails is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResult) SetNodeVersionDetails(newValue SystemGetVersionResponseResultNodeVersionDetails) *SystemGetVersionResponseResult { - o.NodeVersionDetailsPtr = &newValue - return o -} - -// Version is a 'getter' method -func (o *SystemGetVersionResponseResult) Version() string { - r := *o.VersionPtr - return r -} - -// SetVersion is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResult) SetVersion(newValue string) *SystemGetVersionResponseResult { - o.VersionPtr = &newValue - return o -} - -// SystemGetVersionResponseResultVersionTuple is a wrapper -type SystemGetVersionResponseResultVersionTuple struct { - XMLName xml.Name `xml:"version-tuple"` - SystemVersionTuplePtr *SystemVersionTupleType `xml:"system-version-tuple"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemGetVersionResponseResultVersionTuple) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SystemVersionTuple is a 'getter' method -func (o *SystemGetVersionResponseResultVersionTuple) SystemVersionTuple() SystemVersionTupleType { - r := *o.SystemVersionTuplePtr - return r -} - -// SetSystemVersionTuple is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResultVersionTuple) SetSystemVersionTuple(newValue SystemVersionTupleType) *SystemGetVersionResponseResultVersionTuple { - o.SystemVersionTuplePtr = &newValue - return o -} - -// values is a 'getter' method -func (o *SystemGetVersionResponseResultVersionTuple) values() SystemVersionTupleType { - r := *o.SystemVersionTuplePtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResultVersionTuple) setValues(newValue SystemVersionTupleType) *SystemGetVersionResponseResultVersionTuple { - o.SystemVersionTuplePtr = &newValue - return o -} - -// VersionTuple is a 'getter' method -func (o *SystemGetVersionResponseResult) VersionTuple() SystemGetVersionResponseResultVersionTuple { - r := *o.VersionTuplePtr - return r -} - -// SetVersionTuple is a fluent style 'setter' method that can be chained -func (o *SystemGetVersionResponseResult) SetVersionTuple(newValue SystemGetVersionResponseResultVersionTuple) *SystemGetVersionResponseResult { - o.VersionTuplePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-node-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-node-get-iter.go deleted file mode 100644 index 16e58216d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-system-node-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SystemNodeGetIterRequest is a structure to represent a system-node-get-iter Request ZAPI object -type SystemNodeGetIterRequest struct { - XMLName xml.Name `xml:"system-node-get-iter"` - DesiredAttributesPtr *SystemNodeGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *SystemNodeGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// SystemNodeGetIterResponse is a structure to represent a system-node-get-iter Response ZAPI object -type SystemNodeGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result SystemNodeGetIterResponseResult `xml:"results"` -} - -// NewSystemNodeGetIterResponse is a factory method for creating new instances of SystemNodeGetIterResponse objects -func NewSystemNodeGetIterResponse() *SystemNodeGetIterResponse { - return &SystemNodeGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemNodeGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *SystemNodeGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// SystemNodeGetIterResponseResult is a structure to represent a system-node-get-iter Response Result ZAPI object -type SystemNodeGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *SystemNodeGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewSystemNodeGetIterRequest is a factory method for creating new instances of SystemNodeGetIterRequest objects -func NewSystemNodeGetIterRequest() *SystemNodeGetIterRequest { - return &SystemNodeGetIterRequest{} -} - -// NewSystemNodeGetIterResponseResult is a factory method for creating new instances of SystemNodeGetIterResponseResult objects -func NewSystemNodeGetIterResponseResult() *SystemNodeGetIterResponseResult { - return &SystemNodeGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *SystemNodeGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *SystemNodeGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemNodeGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemNodeGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SystemNodeGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*SystemNodeGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *SystemNodeGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*SystemNodeGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "SystemNodeGetIterRequest", NewSystemNodeGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*SystemNodeGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *SystemNodeGetIterRequest) executeWithIteration(zr *ZapiRunner) (*SystemNodeGetIterResponse, error) { - combined := NewSystemNodeGetIterResponse() - combined.Result.SetAttributesList(SystemNodeGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(SystemNodeGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// SystemNodeGetIterRequestDesiredAttributes is a wrapper -type SystemNodeGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - NodeDetailsInfoPtr *NodeDetailsInfoType `xml:"node-details-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemNodeGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NodeDetailsInfo is a 'getter' method -func (o *SystemNodeGetIterRequestDesiredAttributes) NodeDetailsInfo() NodeDetailsInfoType { - r := *o.NodeDetailsInfoPtr - return r -} - -// SetNodeDetailsInfo is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterRequestDesiredAttributes) SetNodeDetailsInfo(newValue NodeDetailsInfoType) *SystemNodeGetIterRequestDesiredAttributes { - o.NodeDetailsInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *SystemNodeGetIterRequest) DesiredAttributes() SystemNodeGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterRequest) SetDesiredAttributes(newValue SystemNodeGetIterRequestDesiredAttributes) *SystemNodeGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *SystemNodeGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterRequest) SetMaxRecords(newValue int) *SystemNodeGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// SystemNodeGetIterRequestQuery is a wrapper -type SystemNodeGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - NodeDetailsInfoPtr *NodeDetailsInfoType `xml:"node-details-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemNodeGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NodeDetailsInfo is a 'getter' method -func (o *SystemNodeGetIterRequestQuery) NodeDetailsInfo() NodeDetailsInfoType { - r := *o.NodeDetailsInfoPtr - return r -} - -// SetNodeDetailsInfo is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterRequestQuery) SetNodeDetailsInfo(newValue NodeDetailsInfoType) *SystemNodeGetIterRequestQuery { - o.NodeDetailsInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *SystemNodeGetIterRequest) Query() SystemNodeGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterRequest) SetQuery(newValue SystemNodeGetIterRequestQuery) *SystemNodeGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *SystemNodeGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterRequest) SetTag(newValue string) *SystemNodeGetIterRequest { - o.TagPtr = &newValue - return o -} - -// SystemNodeGetIterResponseResultAttributesList is a wrapper -type SystemNodeGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - NodeDetailsInfoPtr []NodeDetailsInfoType `xml:"node-details-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemNodeGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// NodeDetailsInfo is a 'getter' method -func (o *SystemNodeGetIterResponseResultAttributesList) NodeDetailsInfo() []NodeDetailsInfoType { - r := o.NodeDetailsInfoPtr - return r -} - -// SetNodeDetailsInfo is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterResponseResultAttributesList) SetNodeDetailsInfo(newValue []NodeDetailsInfoType) *SystemNodeGetIterResponseResultAttributesList { - newSlice := make([]NodeDetailsInfoType, len(newValue)) - copy(newSlice, newValue) - o.NodeDetailsInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *SystemNodeGetIterResponseResultAttributesList) values() []NodeDetailsInfoType { - r := o.NodeDetailsInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterResponseResultAttributesList) setValues(newValue []NodeDetailsInfoType) *SystemNodeGetIterResponseResultAttributesList { - newSlice := make([]NodeDetailsInfoType, len(newValue)) - copy(newSlice, newValue) - o.NodeDetailsInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *SystemNodeGetIterResponseResult) AttributesList() SystemNodeGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterResponseResult) SetAttributesList(newValue SystemNodeGetIterResponseResultAttributesList) *SystemNodeGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *SystemNodeGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterResponseResult) SetNextTag(newValue string) *SystemNodeGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *SystemNodeGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *SystemNodeGetIterResponseResult) SetNumRecords(newValue int) *SystemNodeGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-clone-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-clone-create.go deleted file mode 100644 index 2ed1ce675..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-clone-create.go +++ /dev/null @@ -1,232 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCloneCreateRequest is a structure to represent a volume-clone-create Request ZAPI object -type VolumeCloneCreateRequest struct { - XMLName xml.Name `xml:"volume-clone-create"` - CachingPolicyPtr *string `xml:"caching-policy"` - ParentSnapshotPtr *string `xml:"parent-snapshot"` - ParentVolumePtr *string `xml:"parent-volume"` - ParentVserverPtr *string `xml:"parent-vserver"` - QosPolicyGroupNamePtr *string `xml:"qos-policy-group-name"` - SpaceReservePtr *string `xml:"space-reserve"` - UseSnaprestoreLicensePtr *bool `xml:"use-snaprestore-license"` - VolumePtr *string `xml:"volume"` - VolumeTypePtr *string `xml:"volume-type"` - VserverPtr *string `xml:"vserver"` -} - -// VolumeCloneCreateResponse is a structure to represent a volume-clone-create Response ZAPI object -type VolumeCloneCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeCloneCreateResponseResult `xml:"results"` -} - -// NewVolumeCloneCreateResponse is a factory method for creating new instances of VolumeCloneCreateResponse objects -func NewVolumeCloneCreateResponse() *VolumeCloneCreateResponse { - return &VolumeCloneCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeCloneCreateResponseResult is a structure to represent a volume-clone-create Response Result ZAPI object -type VolumeCloneCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeCloneCreateRequest is a factory method for creating new instances of VolumeCloneCreateRequest objects -func NewVolumeCloneCreateRequest() *VolumeCloneCreateRequest { - return &VolumeCloneCreateRequest{} -} - -// NewVolumeCloneCreateResponseResult is a factory method for creating new instances of VolumeCloneCreateResponseResult objects -func NewVolumeCloneCreateResponseResult() *VolumeCloneCreateResponseResult { - return &VolumeCloneCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCloneCreateRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeCloneCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCloneCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeCloneCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeCloneCreateRequest", NewVolumeCloneCreateResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeCloneCreateResponse), err -} - -// CachingPolicy is a 'getter' method -func (o *VolumeCloneCreateRequest) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetCachingPolicy(newValue string) *VolumeCloneCreateRequest { - o.CachingPolicyPtr = &newValue - return o -} - -// ParentSnapshot is a 'getter' method -func (o *VolumeCloneCreateRequest) ParentSnapshot() string { - r := *o.ParentSnapshotPtr - return r -} - -// SetParentSnapshot is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetParentSnapshot(newValue string) *VolumeCloneCreateRequest { - o.ParentSnapshotPtr = &newValue - return o -} - -// ParentVolume is a 'getter' method -func (o *VolumeCloneCreateRequest) ParentVolume() string { - r := *o.ParentVolumePtr - return r -} - -// SetParentVolume is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetParentVolume(newValue string) *VolumeCloneCreateRequest { - o.ParentVolumePtr = &newValue - return o -} - -// ParentVserver is a 'getter' method -func (o *VolumeCloneCreateRequest) ParentVserver() string { - r := *o.ParentVserverPtr - return r -} - -// SetParentVserver is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetParentVserver(newValue string) *VolumeCloneCreateRequest { - o.ParentVserverPtr = &newValue - return o -} - -// QosPolicyGroupName is a 'getter' method -func (o *VolumeCloneCreateRequest) QosPolicyGroupName() string { - r := *o.QosPolicyGroupNamePtr - return r -} - -// SetQosPolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetQosPolicyGroupName(newValue string) *VolumeCloneCreateRequest { - o.QosPolicyGroupNamePtr = &newValue - return o -} - -// SpaceReserve is a 'getter' method -func (o *VolumeCloneCreateRequest) SpaceReserve() string { - r := *o.SpaceReservePtr - return r -} - -// SetSpaceReserve is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetSpaceReserve(newValue string) *VolumeCloneCreateRequest { - o.SpaceReservePtr = &newValue - return o -} - -// UseSnaprestoreLicense is a 'getter' method -func (o *VolumeCloneCreateRequest) UseSnaprestoreLicense() bool { - r := *o.UseSnaprestoreLicensePtr - return r -} - -// SetUseSnaprestoreLicense is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetUseSnaprestoreLicense(newValue bool) *VolumeCloneCreateRequest { - o.UseSnaprestoreLicensePtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *VolumeCloneCreateRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetVolume(newValue string) *VolumeCloneCreateRequest { - o.VolumePtr = &newValue - return o -} - -// VolumeType is a 'getter' method -func (o *VolumeCloneCreateRequest) VolumeType() string { - r := *o.VolumeTypePtr - return r -} - -// SetVolumeType is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetVolumeType(newValue string) *VolumeCloneCreateRequest { - o.VolumeTypePtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *VolumeCloneCreateRequest) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *VolumeCloneCreateRequest) SetVserver(newValue string) *VolumeCloneCreateRequest { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-clone-split-start.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-clone-split-start.go deleted file mode 100644 index c5f878fc6..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-clone-split-start.go +++ /dev/null @@ -1,167 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCloneSplitStartRequest is a structure to represent a volume-clone-split-start Request ZAPI object -type VolumeCloneSplitStartRequest struct { - XMLName xml.Name `xml:"volume-clone-split-start"` - VolumePtr *string `xml:"volume"` -} - -// VolumeCloneSplitStartResponse is a structure to represent a volume-clone-split-start Response ZAPI object -type VolumeCloneSplitStartResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeCloneSplitStartResponseResult `xml:"results"` -} - -// NewVolumeCloneSplitStartResponse is a factory method for creating new instances of VolumeCloneSplitStartResponse objects -func NewVolumeCloneSplitStartResponse() *VolumeCloneSplitStartResponse { - return &VolumeCloneSplitStartResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneSplitStartResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneSplitStartResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeCloneSplitStartResponseResult is a structure to represent a volume-clone-split-start Response Result ZAPI object -type VolumeCloneSplitStartResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewVolumeCloneSplitStartRequest is a factory method for creating new instances of VolumeCloneSplitStartRequest objects -func NewVolumeCloneSplitStartRequest() *VolumeCloneSplitStartRequest { - return &VolumeCloneSplitStartRequest{} -} - -// NewVolumeCloneSplitStartResponseResult is a factory method for creating new instances of VolumeCloneSplitStartResponseResult objects -func NewVolumeCloneSplitStartResponseResult() *VolumeCloneSplitStartResponseResult { - return &VolumeCloneSplitStartResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneSplitStartRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneSplitStartResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneSplitStartRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneSplitStartResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCloneSplitStartRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeCloneSplitStartResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCloneSplitStartRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeCloneSplitStartResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeCloneSplitStartRequest", NewVolumeCloneSplitStartResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeCloneSplitStartResponse), err -} - -// Volume is a 'getter' method -func (o *VolumeCloneSplitStartRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *VolumeCloneSplitStartRequest) SetVolume(newValue string) *VolumeCloneSplitStartRequest { - o.VolumePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *VolumeCloneSplitStartResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *VolumeCloneSplitStartResponseResult) SetResultErrorCode(newValue int) *VolumeCloneSplitStartResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *VolumeCloneSplitStartResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *VolumeCloneSplitStartResponseResult) SetResultErrorMessage(newValue string) *VolumeCloneSplitStartResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *VolumeCloneSplitStartResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *VolumeCloneSplitStartResponseResult) SetResultJobid(newValue int) *VolumeCloneSplitStartResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *VolumeCloneSplitStartResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *VolumeCloneSplitStartResponseResult) SetResultStatus(newValue string) *VolumeCloneSplitStartResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-create-async.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-create-async.go deleted file mode 100644 index d87c88946..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-create-async.go +++ /dev/null @@ -1,943 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCreateAsyncRequest is a structure to represent a volume-create-async Request ZAPI object -type VolumeCreateAsyncRequest struct { - XMLName xml.Name `xml:"volume-create-async"` - AggrListPtr *VolumeCreateAsyncRequestAggrList `xml:"aggr-list"` - AggrListMultiplierPtr *int `xml:"aggr-list-multiplier"` - AutoProvisionAsPtr *string `xml:"auto-provision-as"` - CacheRetentionPriorityPtr *string `xml:"cache-retention-priority"` - CachingPolicyPtr *string `xml:"caching-policy"` - ContainingAggrNamePtr *string `xml:"containing-aggr-name"` - DataAggrListPtr *VolumeCreateAsyncRequestDataAggrList `xml:"data-aggr-list"` - EfficiencyPolicyPtr *string `xml:"efficiency-policy"` - EnableObjectStorePtr *bool `xml:"enable-object-store"` - EnableSnapdiffPtr *bool `xml:"enable-snapdiff"` - EncryptPtr *bool `xml:"encrypt"` - ExcludedFromAutobalancePtr *bool `xml:"excluded-from-autobalance"` - ExportPolicyPtr *string `xml:"export-policy"` - FlexcacheCachePolicyPtr *string `xml:"flexcache-cache-policy"` - FlexcacheFillPolicyPtr *string `xml:"flexcache-fill-policy"` - FlexcacheOriginVolumeNamePtr *string `xml:"flexcache-origin-volume-name"` - GroupIdPtr *int `xml:"group-id"` - IsJunctionActivePtr *bool `xml:"is-junction-active"` - IsManagedByServicePtr *bool `xml:"is-managed-by-service"` - IsNvfailEnabledPtr *bool `xml:"is-nvfail-enabled"` - IsVserverRootPtr *bool `xml:"is-vserver-root"` - JunctionPathPtr *string `xml:"junction-path"` - LanguageCodePtr *string `xml:"language-code"` - MaxConstituentSizePtr *int `xml:"max-constituent-size"` - MaxDataConstituentSizePtr *int `xml:"max-data-constituent-size"` - MaxDirSizePtr *int `xml:"max-dir-size"` - MaxNamespaceConstituentSizePtr *int `xml:"max-namespace-constituent-size"` - NamespaceAggregatePtr *string `xml:"namespace-aggregate"` - NamespaceMirrorAggrListPtr *VolumeCreateAsyncRequestNamespaceMirrorAggrList `xml:"namespace-mirror-aggr-list"` - ObjectWriteSyncPeriodPtr *int `xml:"object-write-sync-period"` - OlsAggrListPtr *VolumeCreateAsyncRequestOlsAggrList `xml:"ols-aggr-list"` - OlsConstituentCountPtr *int `xml:"ols-constituent-count"` - OlsConstituentSizePtr *int `xml:"ols-constituent-size"` - PercentageSnapshotReservePtr *int `xml:"percentage-snapshot-reserve"` - QosAdaptivePolicyGroupNamePtr *string `xml:"qos-adaptive-policy-group-name"` - QosPolicyGroupNamePtr *string `xml:"qos-policy-group-name"` - SizePtr *int `xml:"size"` - SnapshotPolicyPtr *string `xml:"snapshot-policy"` - SpaceGuaranteePtr *string `xml:"space-guarantee"` - SpaceReservePtr *string `xml:"space-reserve"` - SpaceSloPtr *string `xml:"space-slo"` - StorageServicePtr *string `xml:"storage-service"` - TieringPolicyPtr *string `xml:"tiering-policy"` - UnixPermissionsPtr *string `xml:"unix-permissions"` - UserIdPtr *int `xml:"user-id"` - VmAlignSectorPtr *int `xml:"vm-align-sector"` - VmAlignSuffixPtr *string `xml:"vm-align-suffix"` - VolumeCommentPtr *string `xml:"volume-comment"` - VolumeNamePtr *string `xml:"volume-name"` - VolumeSecurityStylePtr *string `xml:"volume-security-style"` - VolumeStatePtr *string `xml:"volume-state"` - VolumeTypePtr *string `xml:"volume-type"` - VserverDrProtectionPtr *string `xml:"vserver-dr-protection"` -} - -// VolumeCreateAsyncResponse is a structure to represent a volume-create-async Response ZAPI object -type VolumeCreateAsyncResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeCreateAsyncResponseResult `xml:"results"` -} - -// NewVolumeCreateAsyncResponse is a factory method for creating new instances of VolumeCreateAsyncResponse objects -func NewVolumeCreateAsyncResponse() *VolumeCreateAsyncResponse { - return &VolumeCreateAsyncResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCreateAsyncResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeCreateAsyncResponseResult is a structure to represent a volume-create-async Response Result ZAPI object -type VolumeCreateAsyncResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewVolumeCreateAsyncRequest is a factory method for creating new instances of VolumeCreateAsyncRequest objects -func NewVolumeCreateAsyncRequest() *VolumeCreateAsyncRequest { - return &VolumeCreateAsyncRequest{} -} - -// NewVolumeCreateAsyncResponseResult is a factory method for creating new instances of VolumeCreateAsyncResponseResult objects -func NewVolumeCreateAsyncResponseResult() *VolumeCreateAsyncResponseResult { - return &VolumeCreateAsyncResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCreateAsyncRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCreateAsyncResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCreateAsyncRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeCreateAsyncResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCreateAsyncRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeCreateAsyncResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeCreateAsyncRequest", NewVolumeCreateAsyncResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeCreateAsyncResponse), err -} - -// VolumeCreateAsyncRequestAggrList is a wrapper -type VolumeCreateAsyncRequestAggrList struct { - XMLName xml.Name `xml:"aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncRequestAggrList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggrName is a 'getter' method -func (o *VolumeCreateAsyncRequestAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequestAggrList) SetAggrName(newValue []AggrNameType) *VolumeCreateAsyncRequestAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// AggrList is a 'getter' method -func (o *VolumeCreateAsyncRequest) AggrList() VolumeCreateAsyncRequestAggrList { - r := *o.AggrListPtr - return r -} - -// SetAggrList is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetAggrList(newValue VolumeCreateAsyncRequestAggrList) *VolumeCreateAsyncRequest { - o.AggrListPtr = &newValue - return o -} - -// AggrListMultiplier is a 'getter' method -func (o *VolumeCreateAsyncRequest) AggrListMultiplier() int { - r := *o.AggrListMultiplierPtr - return r -} - -// SetAggrListMultiplier is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetAggrListMultiplier(newValue int) *VolumeCreateAsyncRequest { - o.AggrListMultiplierPtr = &newValue - return o -} - -// AutoProvisionAs is a 'getter' method -func (o *VolumeCreateAsyncRequest) AutoProvisionAs() string { - r := *o.AutoProvisionAsPtr - return r -} - -// SetAutoProvisionAs is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetAutoProvisionAs(newValue string) *VolumeCreateAsyncRequest { - o.AutoProvisionAsPtr = &newValue - return o -} - -// CacheRetentionPriority is a 'getter' method -func (o *VolumeCreateAsyncRequest) CacheRetentionPriority() string { - r := *o.CacheRetentionPriorityPtr - return r -} - -// SetCacheRetentionPriority is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetCacheRetentionPriority(newValue string) *VolumeCreateAsyncRequest { - o.CacheRetentionPriorityPtr = &newValue - return o -} - -// CachingPolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetCachingPolicy(newValue string) *VolumeCreateAsyncRequest { - o.CachingPolicyPtr = &newValue - return o -} - -// ContainingAggrName is a 'getter' method -func (o *VolumeCreateAsyncRequest) ContainingAggrName() string { - r := *o.ContainingAggrNamePtr - return r -} - -// SetContainingAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetContainingAggrName(newValue string) *VolumeCreateAsyncRequest { - o.ContainingAggrNamePtr = &newValue - return o -} - -// VolumeCreateAsyncRequestDataAggrList is a wrapper -type VolumeCreateAsyncRequestDataAggrList struct { - XMLName xml.Name `xml:"data-aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncRequestDataAggrList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggrName is a 'getter' method -func (o *VolumeCreateAsyncRequestDataAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequestDataAggrList) SetAggrName(newValue []AggrNameType) *VolumeCreateAsyncRequestDataAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// DataAggrList is a 'getter' method -func (o *VolumeCreateAsyncRequest) DataAggrList() VolumeCreateAsyncRequestDataAggrList { - r := *o.DataAggrListPtr - return r -} - -// SetDataAggrList is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetDataAggrList(newValue VolumeCreateAsyncRequestDataAggrList) *VolumeCreateAsyncRequest { - o.DataAggrListPtr = &newValue - return o -} - -// EfficiencyPolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) EfficiencyPolicy() string { - r := *o.EfficiencyPolicyPtr - return r -} - -// SetEfficiencyPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetEfficiencyPolicy(newValue string) *VolumeCreateAsyncRequest { - o.EfficiencyPolicyPtr = &newValue - return o -} - -// EnableObjectStore is a 'getter' method -func (o *VolumeCreateAsyncRequest) EnableObjectStore() bool { - r := *o.EnableObjectStorePtr - return r -} - -// SetEnableObjectStore is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetEnableObjectStore(newValue bool) *VolumeCreateAsyncRequest { - o.EnableObjectStorePtr = &newValue - return o -} - -// EnableSnapdiff is a 'getter' method -func (o *VolumeCreateAsyncRequest) EnableSnapdiff() bool { - r := *o.EnableSnapdiffPtr - return r -} - -// SetEnableSnapdiff is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetEnableSnapdiff(newValue bool) *VolumeCreateAsyncRequest { - o.EnableSnapdiffPtr = &newValue - return o -} - -// Encrypt is a 'getter' method -func (o *VolumeCreateAsyncRequest) Encrypt() bool { - r := *o.EncryptPtr - return r -} - -// SetEncrypt is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetEncrypt(newValue bool) *VolumeCreateAsyncRequest { - o.EncryptPtr = &newValue - return o -} - -// ExcludedFromAutobalance is a 'getter' method -func (o *VolumeCreateAsyncRequest) ExcludedFromAutobalance() bool { - r := *o.ExcludedFromAutobalancePtr - return r -} - -// SetExcludedFromAutobalance is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetExcludedFromAutobalance(newValue bool) *VolumeCreateAsyncRequest { - o.ExcludedFromAutobalancePtr = &newValue - return o -} - -// ExportPolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) ExportPolicy() string { - r := *o.ExportPolicyPtr - return r -} - -// SetExportPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetExportPolicy(newValue string) *VolumeCreateAsyncRequest { - o.ExportPolicyPtr = &newValue - return o -} - -// FlexcacheCachePolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) FlexcacheCachePolicy() string { - r := *o.FlexcacheCachePolicyPtr - return r -} - -// SetFlexcacheCachePolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetFlexcacheCachePolicy(newValue string) *VolumeCreateAsyncRequest { - o.FlexcacheCachePolicyPtr = &newValue - return o -} - -// FlexcacheFillPolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) FlexcacheFillPolicy() string { - r := *o.FlexcacheFillPolicyPtr - return r -} - -// SetFlexcacheFillPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetFlexcacheFillPolicy(newValue string) *VolumeCreateAsyncRequest { - o.FlexcacheFillPolicyPtr = &newValue - return o -} - -// FlexcacheOriginVolumeName is a 'getter' method -func (o *VolumeCreateAsyncRequest) FlexcacheOriginVolumeName() string { - r := *o.FlexcacheOriginVolumeNamePtr - return r -} - -// SetFlexcacheOriginVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetFlexcacheOriginVolumeName(newValue string) *VolumeCreateAsyncRequest { - o.FlexcacheOriginVolumeNamePtr = &newValue - return o -} - -// GroupId is a 'getter' method -func (o *VolumeCreateAsyncRequest) GroupId() int { - r := *o.GroupIdPtr - return r -} - -// SetGroupId is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetGroupId(newValue int) *VolumeCreateAsyncRequest { - o.GroupIdPtr = &newValue - return o -} - -// IsJunctionActive is a 'getter' method -func (o *VolumeCreateAsyncRequest) IsJunctionActive() bool { - r := *o.IsJunctionActivePtr - return r -} - -// SetIsJunctionActive is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetIsJunctionActive(newValue bool) *VolumeCreateAsyncRequest { - o.IsJunctionActivePtr = &newValue - return o -} - -// IsManagedByService is a 'getter' method -func (o *VolumeCreateAsyncRequest) IsManagedByService() bool { - r := *o.IsManagedByServicePtr - return r -} - -// SetIsManagedByService is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetIsManagedByService(newValue bool) *VolumeCreateAsyncRequest { - o.IsManagedByServicePtr = &newValue - return o -} - -// IsNvfailEnabled is a 'getter' method -func (o *VolumeCreateAsyncRequest) IsNvfailEnabled() bool { - r := *o.IsNvfailEnabledPtr - return r -} - -// SetIsNvfailEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetIsNvfailEnabled(newValue bool) *VolumeCreateAsyncRequest { - o.IsNvfailEnabledPtr = &newValue - return o -} - -// IsVserverRoot is a 'getter' method -func (o *VolumeCreateAsyncRequest) IsVserverRoot() bool { - r := *o.IsVserverRootPtr - return r -} - -// SetIsVserverRoot is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetIsVserverRoot(newValue bool) *VolumeCreateAsyncRequest { - o.IsVserverRootPtr = &newValue - return o -} - -// JunctionPath is a 'getter' method -func (o *VolumeCreateAsyncRequest) JunctionPath() string { - r := *o.JunctionPathPtr - return r -} - -// SetJunctionPath is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetJunctionPath(newValue string) *VolumeCreateAsyncRequest { - o.JunctionPathPtr = &newValue - return o -} - -// LanguageCode is a 'getter' method -func (o *VolumeCreateAsyncRequest) LanguageCode() string { - r := *o.LanguageCodePtr - return r -} - -// SetLanguageCode is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetLanguageCode(newValue string) *VolumeCreateAsyncRequest { - o.LanguageCodePtr = &newValue - return o -} - -// MaxConstituentSize is a 'getter' method -func (o *VolumeCreateAsyncRequest) MaxConstituentSize() int { - r := *o.MaxConstituentSizePtr - return r -} - -// SetMaxConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetMaxConstituentSize(newValue int) *VolumeCreateAsyncRequest { - o.MaxConstituentSizePtr = &newValue - return o -} - -// MaxDataConstituentSize is a 'getter' method -func (o *VolumeCreateAsyncRequest) MaxDataConstituentSize() int { - r := *o.MaxDataConstituentSizePtr - return r -} - -// SetMaxDataConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetMaxDataConstituentSize(newValue int) *VolumeCreateAsyncRequest { - o.MaxDataConstituentSizePtr = &newValue - return o -} - -// MaxDirSize is a 'getter' method -func (o *VolumeCreateAsyncRequest) MaxDirSize() int { - r := *o.MaxDirSizePtr - return r -} - -// SetMaxDirSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetMaxDirSize(newValue int) *VolumeCreateAsyncRequest { - o.MaxDirSizePtr = &newValue - return o -} - -// MaxNamespaceConstituentSize is a 'getter' method -func (o *VolumeCreateAsyncRequest) MaxNamespaceConstituentSize() int { - r := *o.MaxNamespaceConstituentSizePtr - return r -} - -// SetMaxNamespaceConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetMaxNamespaceConstituentSize(newValue int) *VolumeCreateAsyncRequest { - o.MaxNamespaceConstituentSizePtr = &newValue - return o -} - -// NamespaceAggregate is a 'getter' method -func (o *VolumeCreateAsyncRequest) NamespaceAggregate() string { - r := *o.NamespaceAggregatePtr - return r -} - -// SetNamespaceAggregate is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetNamespaceAggregate(newValue string) *VolumeCreateAsyncRequest { - o.NamespaceAggregatePtr = &newValue - return o -} - -// VolumeCreateAsyncRequestNamespaceMirrorAggrList is a wrapper -type VolumeCreateAsyncRequestNamespaceMirrorAggrList struct { - XMLName xml.Name `xml:"namespace-mirror-aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncRequestNamespaceMirrorAggrList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggrName is a 'getter' method -func (o *VolumeCreateAsyncRequestNamespaceMirrorAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequestNamespaceMirrorAggrList) SetAggrName(newValue []AggrNameType) *VolumeCreateAsyncRequestNamespaceMirrorAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// NamespaceMirrorAggrList is a 'getter' method -func (o *VolumeCreateAsyncRequest) NamespaceMirrorAggrList() VolumeCreateAsyncRequestNamespaceMirrorAggrList { - r := *o.NamespaceMirrorAggrListPtr - return r -} - -// SetNamespaceMirrorAggrList is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetNamespaceMirrorAggrList(newValue VolumeCreateAsyncRequestNamespaceMirrorAggrList) *VolumeCreateAsyncRequest { - o.NamespaceMirrorAggrListPtr = &newValue - return o -} - -// ObjectWriteSyncPeriod is a 'getter' method -func (o *VolumeCreateAsyncRequest) ObjectWriteSyncPeriod() int { - r := *o.ObjectWriteSyncPeriodPtr - return r -} - -// SetObjectWriteSyncPeriod is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetObjectWriteSyncPeriod(newValue int) *VolumeCreateAsyncRequest { - o.ObjectWriteSyncPeriodPtr = &newValue - return o -} - -// VolumeCreateAsyncRequestOlsAggrList is a wrapper -type VolumeCreateAsyncRequestOlsAggrList struct { - XMLName xml.Name `xml:"ols-aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateAsyncRequestOlsAggrList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggrName is a 'getter' method -func (o *VolumeCreateAsyncRequestOlsAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequestOlsAggrList) SetAggrName(newValue []AggrNameType) *VolumeCreateAsyncRequestOlsAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// OlsAggrList is a 'getter' method -func (o *VolumeCreateAsyncRequest) OlsAggrList() VolumeCreateAsyncRequestOlsAggrList { - r := *o.OlsAggrListPtr - return r -} - -// SetOlsAggrList is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetOlsAggrList(newValue VolumeCreateAsyncRequestOlsAggrList) *VolumeCreateAsyncRequest { - o.OlsAggrListPtr = &newValue - return o -} - -// OlsConstituentCount is a 'getter' method -func (o *VolumeCreateAsyncRequest) OlsConstituentCount() int { - r := *o.OlsConstituentCountPtr - return r -} - -// SetOlsConstituentCount is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetOlsConstituentCount(newValue int) *VolumeCreateAsyncRequest { - o.OlsConstituentCountPtr = &newValue - return o -} - -// OlsConstituentSize is a 'getter' method -func (o *VolumeCreateAsyncRequest) OlsConstituentSize() int { - r := *o.OlsConstituentSizePtr - return r -} - -// SetOlsConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetOlsConstituentSize(newValue int) *VolumeCreateAsyncRequest { - o.OlsConstituentSizePtr = &newValue - return o -} - -// PercentageSnapshotReserve is a 'getter' method -func (o *VolumeCreateAsyncRequest) PercentageSnapshotReserve() int { - r := *o.PercentageSnapshotReservePtr - return r -} - -// SetPercentageSnapshotReserve is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetPercentageSnapshotReserve(newValue int) *VolumeCreateAsyncRequest { - o.PercentageSnapshotReservePtr = &newValue - return o -} - -// QosAdaptivePolicyGroupName is a 'getter' method -func (o *VolumeCreateAsyncRequest) QosAdaptivePolicyGroupName() string { - r := *o.QosAdaptivePolicyGroupNamePtr - return r -} - -// SetQosAdaptivePolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetQosAdaptivePolicyGroupName(newValue string) *VolumeCreateAsyncRequest { - o.QosAdaptivePolicyGroupNamePtr = &newValue - return o -} - -// QosPolicyGroupName is a 'getter' method -func (o *VolumeCreateAsyncRequest) QosPolicyGroupName() string { - r := *o.QosPolicyGroupNamePtr - return r -} - -// SetQosPolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetQosPolicyGroupName(newValue string) *VolumeCreateAsyncRequest { - o.QosPolicyGroupNamePtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *VolumeCreateAsyncRequest) Size() int { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetSize(newValue int) *VolumeCreateAsyncRequest { - o.SizePtr = &newValue - return o -} - -// SnapshotPolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) SnapshotPolicy() string { - r := *o.SnapshotPolicyPtr - return r -} - -// SetSnapshotPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetSnapshotPolicy(newValue string) *VolumeCreateAsyncRequest { - o.SnapshotPolicyPtr = &newValue - return o -} - -// SpaceGuarantee is a 'getter' method -func (o *VolumeCreateAsyncRequest) SpaceGuarantee() string { - r := *o.SpaceGuaranteePtr - return r -} - -// SetSpaceGuarantee is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetSpaceGuarantee(newValue string) *VolumeCreateAsyncRequest { - o.SpaceGuaranteePtr = &newValue - return o -} - -// SpaceReserve is a 'getter' method -func (o *VolumeCreateAsyncRequest) SpaceReserve() string { - r := *o.SpaceReservePtr - return r -} - -// SetSpaceReserve is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetSpaceReserve(newValue string) *VolumeCreateAsyncRequest { - o.SpaceReservePtr = &newValue - return o -} - -// SpaceSlo is a 'getter' method -func (o *VolumeCreateAsyncRequest) SpaceSlo() string { - r := *o.SpaceSloPtr - return r -} - -// SetSpaceSlo is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetSpaceSlo(newValue string) *VolumeCreateAsyncRequest { - o.SpaceSloPtr = &newValue - return o -} - -// StorageService is a 'getter' method -func (o *VolumeCreateAsyncRequest) StorageService() string { - r := *o.StorageServicePtr - return r -} - -// SetStorageService is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetStorageService(newValue string) *VolumeCreateAsyncRequest { - o.StorageServicePtr = &newValue - return o -} - -// TieringPolicy is a 'getter' method -func (o *VolumeCreateAsyncRequest) TieringPolicy() string { - r := *o.TieringPolicyPtr - return r -} - -// SetTieringPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetTieringPolicy(newValue string) *VolumeCreateAsyncRequest { - o.TieringPolicyPtr = &newValue - return o -} - -// UnixPermissions is a 'getter' method -func (o *VolumeCreateAsyncRequest) UnixPermissions() string { - r := *o.UnixPermissionsPtr - return r -} - -// SetUnixPermissions is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetUnixPermissions(newValue string) *VolumeCreateAsyncRequest { - o.UnixPermissionsPtr = &newValue - return o -} - -// UserId is a 'getter' method -func (o *VolumeCreateAsyncRequest) UserId() int { - r := *o.UserIdPtr - return r -} - -// SetUserId is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetUserId(newValue int) *VolumeCreateAsyncRequest { - o.UserIdPtr = &newValue - return o -} - -// VmAlignSector is a 'getter' method -func (o *VolumeCreateAsyncRequest) VmAlignSector() int { - r := *o.VmAlignSectorPtr - return r -} - -// SetVmAlignSector is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVmAlignSector(newValue int) *VolumeCreateAsyncRequest { - o.VmAlignSectorPtr = &newValue - return o -} - -// VmAlignSuffix is a 'getter' method -func (o *VolumeCreateAsyncRequest) VmAlignSuffix() string { - r := *o.VmAlignSuffixPtr - return r -} - -// SetVmAlignSuffix is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVmAlignSuffix(newValue string) *VolumeCreateAsyncRequest { - o.VmAlignSuffixPtr = &newValue - return o -} - -// VolumeComment is a 'getter' method -func (o *VolumeCreateAsyncRequest) VolumeComment() string { - r := *o.VolumeCommentPtr - return r -} - -// SetVolumeComment is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVolumeComment(newValue string) *VolumeCreateAsyncRequest { - o.VolumeCommentPtr = &newValue - return o -} - -// VolumeName is a 'getter' method -func (o *VolumeCreateAsyncRequest) VolumeName() string { - r := *o.VolumeNamePtr - return r -} - -// SetVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVolumeName(newValue string) *VolumeCreateAsyncRequest { - o.VolumeNamePtr = &newValue - return o -} - -// VolumeSecurityStyle is a 'getter' method -func (o *VolumeCreateAsyncRequest) VolumeSecurityStyle() string { - r := *o.VolumeSecurityStylePtr - return r -} - -// SetVolumeSecurityStyle is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVolumeSecurityStyle(newValue string) *VolumeCreateAsyncRequest { - o.VolumeSecurityStylePtr = &newValue - return o -} - -// VolumeState is a 'getter' method -func (o *VolumeCreateAsyncRequest) VolumeState() string { - r := *o.VolumeStatePtr - return r -} - -// SetVolumeState is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVolumeState(newValue string) *VolumeCreateAsyncRequest { - o.VolumeStatePtr = &newValue - return o -} - -// VolumeType is a 'getter' method -func (o *VolumeCreateAsyncRequest) VolumeType() string { - r := *o.VolumeTypePtr - return r -} - -// SetVolumeType is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVolumeType(newValue string) *VolumeCreateAsyncRequest { - o.VolumeTypePtr = &newValue - return o -} - -// VserverDrProtection is a 'getter' method -func (o *VolumeCreateAsyncRequest) VserverDrProtection() string { - r := *o.VserverDrProtectionPtr - return r -} - -// SetVserverDrProtection is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncRequest) SetVserverDrProtection(newValue string) *VolumeCreateAsyncRequest { - o.VserverDrProtectionPtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *VolumeCreateAsyncResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncResponseResult) SetResultErrorCode(newValue int) *VolumeCreateAsyncResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *VolumeCreateAsyncResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncResponseResult) SetResultErrorMessage(newValue string) *VolumeCreateAsyncResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *VolumeCreateAsyncResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncResponseResult) SetResultJobid(newValue int) *VolumeCreateAsyncResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *VolumeCreateAsyncResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *VolumeCreateAsyncResponseResult) SetResultStatus(newValue string) *VolumeCreateAsyncResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-create.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-create.go deleted file mode 100644 index 8187ef1cb..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-create.go +++ /dev/null @@ -1,687 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCreateRequest is a structure to represent a volume-create Request ZAPI object -type VolumeCreateRequest struct { - XMLName xml.Name `xml:"volume-create"` - AntivirusOnAccessPolicyPtr *string `xml:"antivirus-on-access-policy"` - CacheRetentionPriorityPtr *string `xml:"cache-retention-priority"` - CachingPolicyPtr *string `xml:"caching-policy"` - ConstituentRolePtr *string `xml:"constituent-role"` - ContainingAggrNamePtr *string `xml:"containing-aggr-name"` - EfficiencyPolicyPtr *string `xml:"efficiency-policy"` - EncryptPtr *bool `xml:"encrypt"` - ExcludedFromAutobalancePtr *bool `xml:"excluded-from-autobalance"` - ExportPolicyPtr *string `xml:"export-policy"` - ExtentSizePtr *string `xml:"extent-size"` - FlexcacheCachePolicyPtr *string `xml:"flexcache-cache-policy"` - FlexcacheFillPolicyPtr *string `xml:"flexcache-fill-policy"` - FlexcacheOriginVolumeNamePtr *string `xml:"flexcache-origin-volume-name"` - GroupIdPtr *int `xml:"group-id"` - IsJunctionActivePtr *bool `xml:"is-junction-active"` - IsNvfailEnabledPtr *string `xml:"is-nvfail-enabled"` - IsVserverRootPtr *bool `xml:"is-vserver-root"` - JunctionPathPtr *string `xml:"junction-path"` - LanguageCodePtr *string `xml:"language-code"` - MaxDirSizePtr *int `xml:"max-dir-size"` - MaxWriteAllocBlocksPtr *int `xml:"max-write-alloc-blocks"` - PercentageSnapshotReservePtr *int `xml:"percentage-snapshot-reserve"` - QosAdaptivePolicyGroupNamePtr *string `xml:"qos-adaptive-policy-group-name"` - QosPolicyGroupNamePtr *string `xml:"qos-policy-group-name"` - SizePtr *string `xml:"size"` - SnapshotPolicyPtr *string `xml:"snapshot-policy"` - SpaceReservePtr *string `xml:"space-reserve"` - SpaceSloPtr *string `xml:"space-slo"` - StorageServicePtr *string `xml:"storage-service"` - StripeAlgorithmPtr *string `xml:"stripe-algorithm"` - StripeConcurrencyPtr *string `xml:"stripe-concurrency"` - StripeConstituentVolumeCountPtr *int `xml:"stripe-constituent-volume-count"` - StripeOptimizePtr *string `xml:"stripe-optimize"` - StripeWidthPtr *int `xml:"stripe-width"` - TieringPolicyPtr *string `xml:"tiering-policy"` - UnixPermissionsPtr *string `xml:"unix-permissions"` - UserIdPtr *int `xml:"user-id"` - VmAlignSectorPtr *int `xml:"vm-align-sector"` - VmAlignSuffixPtr *string `xml:"vm-align-suffix"` - VolumePtr *string `xml:"volume"` - VolumeCommentPtr *string `xml:"volume-comment"` - VolumeSecurityStylePtr *string `xml:"volume-security-style"` - VolumeStatePtr *string `xml:"volume-state"` - VolumeTypePtr *string `xml:"volume-type"` - VserverDrProtectionPtr *string `xml:"vserver-dr-protection"` -} - -// VolumeCreateResponse is a structure to represent a volume-create Response ZAPI object -type VolumeCreateResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeCreateResponseResult `xml:"results"` -} - -// NewVolumeCreateResponse is a factory method for creating new instances of VolumeCreateResponse objects -func NewVolumeCreateResponse() *VolumeCreateResponse { - return &VolumeCreateResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCreateResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeCreateResponseResult is a structure to represent a volume-create Response Result ZAPI object -type VolumeCreateResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeCreateRequest is a factory method for creating new instances of VolumeCreateRequest objects -func NewVolumeCreateRequest() *VolumeCreateRequest { - return &VolumeCreateRequest{} -} - -// NewVolumeCreateResponseResult is a factory method for creating new instances of VolumeCreateResponseResult objects -func NewVolumeCreateResponseResult() *VolumeCreateResponseResult { - return &VolumeCreateResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCreateRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCreateResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCreateResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCreateRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeCreateResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeCreateRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeCreateResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeCreateRequest", NewVolumeCreateResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeCreateResponse), err -} - -// AntivirusOnAccessPolicy is a 'getter' method -func (o *VolumeCreateRequest) AntivirusOnAccessPolicy() string { - r := *o.AntivirusOnAccessPolicyPtr - return r -} - -// SetAntivirusOnAccessPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetAntivirusOnAccessPolicy(newValue string) *VolumeCreateRequest { - o.AntivirusOnAccessPolicyPtr = &newValue - return o -} - -// CacheRetentionPriority is a 'getter' method -func (o *VolumeCreateRequest) CacheRetentionPriority() string { - r := *o.CacheRetentionPriorityPtr - return r -} - -// SetCacheRetentionPriority is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetCacheRetentionPriority(newValue string) *VolumeCreateRequest { - o.CacheRetentionPriorityPtr = &newValue - return o -} - -// CachingPolicy is a 'getter' method -func (o *VolumeCreateRequest) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetCachingPolicy(newValue string) *VolumeCreateRequest { - o.CachingPolicyPtr = &newValue - return o -} - -// ConstituentRole is a 'getter' method -func (o *VolumeCreateRequest) ConstituentRole() string { - r := *o.ConstituentRolePtr - return r -} - -// SetConstituentRole is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetConstituentRole(newValue string) *VolumeCreateRequest { - o.ConstituentRolePtr = &newValue - return o -} - -// ContainingAggrName is a 'getter' method -func (o *VolumeCreateRequest) ContainingAggrName() string { - r := *o.ContainingAggrNamePtr - return r -} - -// SetContainingAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetContainingAggrName(newValue string) *VolumeCreateRequest { - o.ContainingAggrNamePtr = &newValue - return o -} - -// EfficiencyPolicy is a 'getter' method -func (o *VolumeCreateRequest) EfficiencyPolicy() string { - r := *o.EfficiencyPolicyPtr - return r -} - -// SetEfficiencyPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetEfficiencyPolicy(newValue string) *VolumeCreateRequest { - o.EfficiencyPolicyPtr = &newValue - return o -} - -// Encrypt is a 'getter' method -func (o *VolumeCreateRequest) Encrypt() bool { - r := *o.EncryptPtr - return r -} - -// SetEncrypt is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetEncrypt(newValue bool) *VolumeCreateRequest { - o.EncryptPtr = &newValue - return o -} - -// ExcludedFromAutobalance is a 'getter' method -func (o *VolumeCreateRequest) ExcludedFromAutobalance() bool { - r := *o.ExcludedFromAutobalancePtr - return r -} - -// SetExcludedFromAutobalance is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetExcludedFromAutobalance(newValue bool) *VolumeCreateRequest { - o.ExcludedFromAutobalancePtr = &newValue - return o -} - -// ExportPolicy is a 'getter' method -func (o *VolumeCreateRequest) ExportPolicy() string { - r := *o.ExportPolicyPtr - return r -} - -// SetExportPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetExportPolicy(newValue string) *VolumeCreateRequest { - o.ExportPolicyPtr = &newValue - return o -} - -// ExtentSize is a 'getter' method -func (o *VolumeCreateRequest) ExtentSize() string { - r := *o.ExtentSizePtr - return r -} - -// SetExtentSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetExtentSize(newValue string) *VolumeCreateRequest { - o.ExtentSizePtr = &newValue - return o -} - -// FlexcacheCachePolicy is a 'getter' method -func (o *VolumeCreateRequest) FlexcacheCachePolicy() string { - r := *o.FlexcacheCachePolicyPtr - return r -} - -// SetFlexcacheCachePolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetFlexcacheCachePolicy(newValue string) *VolumeCreateRequest { - o.FlexcacheCachePolicyPtr = &newValue - return o -} - -// FlexcacheFillPolicy is a 'getter' method -func (o *VolumeCreateRequest) FlexcacheFillPolicy() string { - r := *o.FlexcacheFillPolicyPtr - return r -} - -// SetFlexcacheFillPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetFlexcacheFillPolicy(newValue string) *VolumeCreateRequest { - o.FlexcacheFillPolicyPtr = &newValue - return o -} - -// FlexcacheOriginVolumeName is a 'getter' method -func (o *VolumeCreateRequest) FlexcacheOriginVolumeName() string { - r := *o.FlexcacheOriginVolumeNamePtr - return r -} - -// SetFlexcacheOriginVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetFlexcacheOriginVolumeName(newValue string) *VolumeCreateRequest { - o.FlexcacheOriginVolumeNamePtr = &newValue - return o -} - -// GroupId is a 'getter' method -func (o *VolumeCreateRequest) GroupId() int { - r := *o.GroupIdPtr - return r -} - -// SetGroupId is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetGroupId(newValue int) *VolumeCreateRequest { - o.GroupIdPtr = &newValue - return o -} - -// IsJunctionActive is a 'getter' method -func (o *VolumeCreateRequest) IsJunctionActive() bool { - r := *o.IsJunctionActivePtr - return r -} - -// SetIsJunctionActive is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetIsJunctionActive(newValue bool) *VolumeCreateRequest { - o.IsJunctionActivePtr = &newValue - return o -} - -// IsNvfailEnabled is a 'getter' method -func (o *VolumeCreateRequest) IsNvfailEnabled() string { - r := *o.IsNvfailEnabledPtr - return r -} - -// SetIsNvfailEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetIsNvfailEnabled(newValue string) *VolumeCreateRequest { - o.IsNvfailEnabledPtr = &newValue - return o -} - -// IsVserverRoot is a 'getter' method -func (o *VolumeCreateRequest) IsVserverRoot() bool { - r := *o.IsVserverRootPtr - return r -} - -// SetIsVserverRoot is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetIsVserverRoot(newValue bool) *VolumeCreateRequest { - o.IsVserverRootPtr = &newValue - return o -} - -// JunctionPath is a 'getter' method -func (o *VolumeCreateRequest) JunctionPath() string { - r := *o.JunctionPathPtr - return r -} - -// SetJunctionPath is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetJunctionPath(newValue string) *VolumeCreateRequest { - o.JunctionPathPtr = &newValue - return o -} - -// LanguageCode is a 'getter' method -func (o *VolumeCreateRequest) LanguageCode() string { - r := *o.LanguageCodePtr - return r -} - -// SetLanguageCode is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetLanguageCode(newValue string) *VolumeCreateRequest { - o.LanguageCodePtr = &newValue - return o -} - -// MaxDirSize is a 'getter' method -func (o *VolumeCreateRequest) MaxDirSize() int { - r := *o.MaxDirSizePtr - return r -} - -// SetMaxDirSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetMaxDirSize(newValue int) *VolumeCreateRequest { - o.MaxDirSizePtr = &newValue - return o -} - -// MaxWriteAllocBlocks is a 'getter' method -func (o *VolumeCreateRequest) MaxWriteAllocBlocks() int { - r := *o.MaxWriteAllocBlocksPtr - return r -} - -// SetMaxWriteAllocBlocks is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetMaxWriteAllocBlocks(newValue int) *VolumeCreateRequest { - o.MaxWriteAllocBlocksPtr = &newValue - return o -} - -// PercentageSnapshotReserve is a 'getter' method -func (o *VolumeCreateRequest) PercentageSnapshotReserve() int { - r := *o.PercentageSnapshotReservePtr - return r -} - -// SetPercentageSnapshotReserve is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetPercentageSnapshotReserve(newValue int) *VolumeCreateRequest { - o.PercentageSnapshotReservePtr = &newValue - return o -} - -// QosAdaptivePolicyGroupName is a 'getter' method -func (o *VolumeCreateRequest) QosAdaptivePolicyGroupName() string { - r := *o.QosAdaptivePolicyGroupNamePtr - return r -} - -// SetQosAdaptivePolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetQosAdaptivePolicyGroupName(newValue string) *VolumeCreateRequest { - o.QosAdaptivePolicyGroupNamePtr = &newValue - return o -} - -// QosPolicyGroupName is a 'getter' method -func (o *VolumeCreateRequest) QosPolicyGroupName() string { - r := *o.QosPolicyGroupNamePtr - return r -} - -// SetQosPolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetQosPolicyGroupName(newValue string) *VolumeCreateRequest { - o.QosPolicyGroupNamePtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *VolumeCreateRequest) Size() string { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetSize(newValue string) *VolumeCreateRequest { - o.SizePtr = &newValue - return o -} - -// SnapshotPolicy is a 'getter' method -func (o *VolumeCreateRequest) SnapshotPolicy() string { - r := *o.SnapshotPolicyPtr - return r -} - -// SetSnapshotPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetSnapshotPolicy(newValue string) *VolumeCreateRequest { - o.SnapshotPolicyPtr = &newValue - return o -} - -// SpaceReserve is a 'getter' method -func (o *VolumeCreateRequest) SpaceReserve() string { - r := *o.SpaceReservePtr - return r -} - -// SetSpaceReserve is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetSpaceReserve(newValue string) *VolumeCreateRequest { - o.SpaceReservePtr = &newValue - return o -} - -// SpaceSlo is a 'getter' method -func (o *VolumeCreateRequest) SpaceSlo() string { - r := *o.SpaceSloPtr - return r -} - -// SetSpaceSlo is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetSpaceSlo(newValue string) *VolumeCreateRequest { - o.SpaceSloPtr = &newValue - return o -} - -// StorageService is a 'getter' method -func (o *VolumeCreateRequest) StorageService() string { - r := *o.StorageServicePtr - return r -} - -// SetStorageService is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetStorageService(newValue string) *VolumeCreateRequest { - o.StorageServicePtr = &newValue - return o -} - -// StripeAlgorithm is a 'getter' method -func (o *VolumeCreateRequest) StripeAlgorithm() string { - r := *o.StripeAlgorithmPtr - return r -} - -// SetStripeAlgorithm is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetStripeAlgorithm(newValue string) *VolumeCreateRequest { - o.StripeAlgorithmPtr = &newValue - return o -} - -// StripeConcurrency is a 'getter' method -func (o *VolumeCreateRequest) StripeConcurrency() string { - r := *o.StripeConcurrencyPtr - return r -} - -// SetStripeConcurrency is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetStripeConcurrency(newValue string) *VolumeCreateRequest { - o.StripeConcurrencyPtr = &newValue - return o -} - -// StripeConstituentVolumeCount is a 'getter' method -func (o *VolumeCreateRequest) StripeConstituentVolumeCount() int { - r := *o.StripeConstituentVolumeCountPtr - return r -} - -// SetStripeConstituentVolumeCount is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetStripeConstituentVolumeCount(newValue int) *VolumeCreateRequest { - o.StripeConstituentVolumeCountPtr = &newValue - return o -} - -// StripeOptimize is a 'getter' method -func (o *VolumeCreateRequest) StripeOptimize() string { - r := *o.StripeOptimizePtr - return r -} - -// SetStripeOptimize is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetStripeOptimize(newValue string) *VolumeCreateRequest { - o.StripeOptimizePtr = &newValue - return o -} - -// StripeWidth is a 'getter' method -func (o *VolumeCreateRequest) StripeWidth() int { - r := *o.StripeWidthPtr - return r -} - -// SetStripeWidth is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetStripeWidth(newValue int) *VolumeCreateRequest { - o.StripeWidthPtr = &newValue - return o -} - -// TieringPolicy is a 'getter' method -func (o *VolumeCreateRequest) TieringPolicy() string { - r := *o.TieringPolicyPtr - return r -} - -// SetTieringPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetTieringPolicy(newValue string) *VolumeCreateRequest { - o.TieringPolicyPtr = &newValue - return o -} - -// UnixPermissions is a 'getter' method -func (o *VolumeCreateRequest) UnixPermissions() string { - r := *o.UnixPermissionsPtr - return r -} - -// SetUnixPermissions is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetUnixPermissions(newValue string) *VolumeCreateRequest { - o.UnixPermissionsPtr = &newValue - return o -} - -// UserId is a 'getter' method -func (o *VolumeCreateRequest) UserId() int { - r := *o.UserIdPtr - return r -} - -// SetUserId is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetUserId(newValue int) *VolumeCreateRequest { - o.UserIdPtr = &newValue - return o -} - -// VmAlignSector is a 'getter' method -func (o *VolumeCreateRequest) VmAlignSector() int { - r := *o.VmAlignSectorPtr - return r -} - -// SetVmAlignSector is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVmAlignSector(newValue int) *VolumeCreateRequest { - o.VmAlignSectorPtr = &newValue - return o -} - -// VmAlignSuffix is a 'getter' method -func (o *VolumeCreateRequest) VmAlignSuffix() string { - r := *o.VmAlignSuffixPtr - return r -} - -// SetVmAlignSuffix is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVmAlignSuffix(newValue string) *VolumeCreateRequest { - o.VmAlignSuffixPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *VolumeCreateRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVolume(newValue string) *VolumeCreateRequest { - o.VolumePtr = &newValue - return o -} - -// VolumeComment is a 'getter' method -func (o *VolumeCreateRequest) VolumeComment() string { - r := *o.VolumeCommentPtr - return r -} - -// SetVolumeComment is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVolumeComment(newValue string) *VolumeCreateRequest { - o.VolumeCommentPtr = &newValue - return o -} - -// VolumeSecurityStyle is a 'getter' method -func (o *VolumeCreateRequest) VolumeSecurityStyle() string { - r := *o.VolumeSecurityStylePtr - return r -} - -// SetVolumeSecurityStyle is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVolumeSecurityStyle(newValue string) *VolumeCreateRequest { - o.VolumeSecurityStylePtr = &newValue - return o -} - -// VolumeState is a 'getter' method -func (o *VolumeCreateRequest) VolumeState() string { - r := *o.VolumeStatePtr - return r -} - -// SetVolumeState is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVolumeState(newValue string) *VolumeCreateRequest { - o.VolumeStatePtr = &newValue - return o -} - -// VolumeType is a 'getter' method -func (o *VolumeCreateRequest) VolumeType() string { - r := *o.VolumeTypePtr - return r -} - -// SetVolumeType is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVolumeType(newValue string) *VolumeCreateRequest { - o.VolumeTypePtr = &newValue - return o -} - -// VserverDrProtection is a 'getter' method -func (o *VolumeCreateRequest) VserverDrProtection() string { - r := *o.VserverDrProtectionPtr - return r -} - -// SetVserverDrProtection is a fluent style 'setter' method that can be chained -func (o *VolumeCreateRequest) SetVserverDrProtection(newValue string) *VolumeCreateRequest { - o.VserverDrProtectionPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-destroy-async.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-destroy-async.go deleted file mode 100644 index 471f7235d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-destroy-async.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeDestroyAsyncRequest is a structure to represent a volume-destroy-async Request ZAPI object -type VolumeDestroyAsyncRequest struct { - XMLName xml.Name `xml:"volume-destroy-async"` - UnmountAndOfflinePtr *bool `xml:"unmount-and-offline"` - VolumeNamePtr *string `xml:"volume-name"` -} - -// VolumeDestroyAsyncResponse is a structure to represent a volume-destroy-async Response ZAPI object -type VolumeDestroyAsyncResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeDestroyAsyncResponseResult `xml:"results"` -} - -// NewVolumeDestroyAsyncResponse is a factory method for creating new instances of VolumeDestroyAsyncResponse objects -func NewVolumeDestroyAsyncResponse() *VolumeDestroyAsyncResponse { - return &VolumeDestroyAsyncResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDestroyAsyncResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDestroyAsyncResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeDestroyAsyncResponseResult is a structure to represent a volume-destroy-async Response Result ZAPI object -type VolumeDestroyAsyncResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` -} - -// NewVolumeDestroyAsyncRequest is a factory method for creating new instances of VolumeDestroyAsyncRequest objects -func NewVolumeDestroyAsyncRequest() *VolumeDestroyAsyncRequest { - return &VolumeDestroyAsyncRequest{} -} - -// NewVolumeDestroyAsyncResponseResult is a factory method for creating new instances of VolumeDestroyAsyncResponseResult objects -func NewVolumeDestroyAsyncResponseResult() *VolumeDestroyAsyncResponseResult { - return &VolumeDestroyAsyncResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDestroyAsyncRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDestroyAsyncResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDestroyAsyncRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDestroyAsyncResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeDestroyAsyncRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeDestroyAsyncResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeDestroyAsyncRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeDestroyAsyncResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeDestroyAsyncRequest", NewVolumeDestroyAsyncResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeDestroyAsyncResponse), err -} - -// UnmountAndOffline is a 'getter' method -func (o *VolumeDestroyAsyncRequest) UnmountAndOffline() bool { - r := *o.UnmountAndOfflinePtr - return r -} - -// SetUnmountAndOffline is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyAsyncRequest) SetUnmountAndOffline(newValue bool) *VolumeDestroyAsyncRequest { - o.UnmountAndOfflinePtr = &newValue - return o -} - -// VolumeName is a 'getter' method -func (o *VolumeDestroyAsyncRequest) VolumeName() string { - r := *o.VolumeNamePtr - return r -} - -// SetVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyAsyncRequest) SetVolumeName(newValue string) *VolumeDestroyAsyncRequest { - o.VolumeNamePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *VolumeDestroyAsyncResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyAsyncResponseResult) SetResultErrorCode(newValue int) *VolumeDestroyAsyncResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *VolumeDestroyAsyncResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyAsyncResponseResult) SetResultErrorMessage(newValue string) *VolumeDestroyAsyncResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *VolumeDestroyAsyncResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyAsyncResponseResult) SetResultJobid(newValue int) *VolumeDestroyAsyncResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *VolumeDestroyAsyncResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyAsyncResponseResult) SetResultStatus(newValue string) *VolumeDestroyAsyncResponseResult { - o.ResultStatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-destroy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-destroy.go deleted file mode 100644 index 25465f602..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-destroy.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeDestroyRequest is a structure to represent a volume-destroy Request ZAPI object -type VolumeDestroyRequest struct { - XMLName xml.Name `xml:"volume-destroy"` - NamePtr *string `xml:"name"` - UnmountAndOfflinePtr *bool `xml:"unmount-and-offline"` -} - -// VolumeDestroyResponse is a structure to represent a volume-destroy Response ZAPI object -type VolumeDestroyResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeDestroyResponseResult `xml:"results"` -} - -// NewVolumeDestroyResponse is a factory method for creating new instances of VolumeDestroyResponse objects -func NewVolumeDestroyResponse() *VolumeDestroyResponse { - return &VolumeDestroyResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDestroyResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDestroyResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeDestroyResponseResult is a structure to represent a volume-destroy Response Result ZAPI object -type VolumeDestroyResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeDestroyRequest is a factory method for creating new instances of VolumeDestroyRequest objects -func NewVolumeDestroyRequest() *VolumeDestroyRequest { - return &VolumeDestroyRequest{} -} - -// NewVolumeDestroyResponseResult is a factory method for creating new instances of VolumeDestroyResponseResult objects -func NewVolumeDestroyResponseResult() *VolumeDestroyResponseResult { - return &VolumeDestroyResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDestroyRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDestroyResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDestroyRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDestroyResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeDestroyRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeDestroyResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeDestroyRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeDestroyResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeDestroyRequest", NewVolumeDestroyResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeDestroyResponse), err -} - -// Name is a 'getter' method -func (o *VolumeDestroyRequest) Name() string { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyRequest) SetName(newValue string) *VolumeDestroyRequest { - o.NamePtr = &newValue - return o -} - -// UnmountAndOffline is a 'getter' method -func (o *VolumeDestroyRequest) UnmountAndOffline() bool { - r := *o.UnmountAndOfflinePtr - return r -} - -// SetUnmountAndOffline is a fluent style 'setter' method that can be chained -func (o *VolumeDestroyRequest) SetUnmountAndOffline(newValue bool) *VolumeDestroyRequest { - o.UnmountAndOfflinePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-get-iter.go deleted file mode 100644 index e829c12e3..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeGetIterRequest is a structure to represent a volume-get-iter Request ZAPI object -type VolumeGetIterRequest struct { - XMLName xml.Name `xml:"volume-get-iter"` - DesiredAttributesPtr *VolumeGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *VolumeGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// VolumeGetIterResponse is a structure to represent a volume-get-iter Response ZAPI object -type VolumeGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeGetIterResponseResult `xml:"results"` -} - -// NewVolumeGetIterResponse is a factory method for creating new instances of VolumeGetIterResponse objects -func NewVolumeGetIterResponse() *VolumeGetIterResponse { - return &VolumeGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeGetIterResponseResult is a structure to represent a volume-get-iter Response Result ZAPI object -type VolumeGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *VolumeGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewVolumeGetIterRequest is a factory method for creating new instances of VolumeGetIterRequest objects -func NewVolumeGetIterRequest() *VolumeGetIterRequest { - return &VolumeGetIterRequest{} -} - -// NewVolumeGetIterResponseResult is a factory method for creating new instances of VolumeGetIterResponseResult objects -func NewVolumeGetIterResponseResult() *VolumeGetIterResponseResult { - return &VolumeGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeGetIterRequest", NewVolumeGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *VolumeGetIterRequest) executeWithIteration(zr *ZapiRunner) (*VolumeGetIterResponse, error) { - combined := NewVolumeGetIterResponse() - combined.Result.SetAttributesList(VolumeGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(VolumeGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// VolumeGetIterRequestDesiredAttributes is a wrapper -type VolumeGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeGetIterRequestDesiredAttributes) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterRequestDesiredAttributes) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeGetIterRequestDesiredAttributes { - o.VolumeAttributesPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *VolumeGetIterRequest) DesiredAttributes() VolumeGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterRequest) SetDesiredAttributes(newValue VolumeGetIterRequestDesiredAttributes) *VolumeGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *VolumeGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterRequest) SetMaxRecords(newValue int) *VolumeGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// VolumeGetIterRequestQuery is a wrapper -type VolumeGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeGetIterRequestQuery) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterRequestQuery) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeGetIterRequestQuery { - o.VolumeAttributesPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *VolumeGetIterRequest) Query() VolumeGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterRequest) SetQuery(newValue VolumeGetIterRequestQuery) *VolumeGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *VolumeGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterRequest) SetTag(newValue string) *VolumeGetIterRequest { - o.TagPtr = &newValue - return o -} - -// VolumeGetIterResponseResultAttributesList is a wrapper -type VolumeGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - VolumeAttributesPtr []VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeGetIterResponseResultAttributesList) VolumeAttributes() []VolumeAttributesType { - r := o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterResponseResultAttributesList) SetVolumeAttributes(newValue []VolumeAttributesType) *VolumeGetIterResponseResultAttributesList { - newSlice := make([]VolumeAttributesType, len(newValue)) - copy(newSlice, newValue) - o.VolumeAttributesPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VolumeGetIterResponseResultAttributesList) values() []VolumeAttributesType { - r := o.VolumeAttributesPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterResponseResultAttributesList) setValues(newValue []VolumeAttributesType) *VolumeGetIterResponseResultAttributesList { - newSlice := make([]VolumeAttributesType, len(newValue)) - copy(newSlice, newValue) - o.VolumeAttributesPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *VolumeGetIterResponseResult) AttributesList() VolumeGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterResponseResult) SetAttributesList(newValue VolumeGetIterResponseResultAttributesList) *VolumeGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *VolumeGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterResponseResult) SetNextTag(newValue string) *VolumeGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *VolumeGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *VolumeGetIterResponseResult) SetNumRecords(newValue int) *VolumeGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-modify-iter-async.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-modify-iter-async.go deleted file mode 100644 index ce0153f7c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-modify-iter-async.go +++ /dev/null @@ -1,395 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeModifyIterAsyncRequest is a structure to represent a volume-modify-iter-async Request ZAPI object -type VolumeModifyIterAsyncRequest struct { - XMLName xml.Name `xml:"volume-modify-iter-async"` - AttributesPtr *VolumeModifyIterAsyncRequestAttributes `xml:"attributes"` - ContinueOnFailurePtr *bool `xml:"continue-on-failure"` - MaxFailureCountPtr *int `xml:"max-failure-count"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *VolumeModifyIterAsyncRequestQuery `xml:"query"` - ReturnFailureListPtr *bool `xml:"return-failure-list"` - ReturnSuccessListPtr *bool `xml:"return-success-list"` - TagPtr *string `xml:"tag"` -} - -// VolumeModifyIterAsyncResponse is a structure to represent a volume-modify-iter-async Response ZAPI object -type VolumeModifyIterAsyncResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeModifyIterAsyncResponseResult `xml:"results"` -} - -// NewVolumeModifyIterAsyncResponse is a factory method for creating new instances of VolumeModifyIterAsyncResponse objects -func NewVolumeModifyIterAsyncResponse() *VolumeModifyIterAsyncResponse { - return &VolumeModifyIterAsyncResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterAsyncResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeModifyIterAsyncResponseResult is a structure to represent a volume-modify-iter-async Response Result ZAPI object -type VolumeModifyIterAsyncResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - FailureListPtr *VolumeModifyIterAsyncResponseResultFailureList `xml:"failure-list"` - NextTagPtr *string `xml:"next-tag"` - NumFailedPtr *int `xml:"num-failed"` - NumSucceededPtr *int `xml:"num-succeeded"` - SuccessListPtr *VolumeModifyIterAsyncResponseResultSuccessList `xml:"success-list"` -} - -// NewVolumeModifyIterAsyncRequest is a factory method for creating new instances of VolumeModifyIterAsyncRequest objects -func NewVolumeModifyIterAsyncRequest() *VolumeModifyIterAsyncRequest { - return &VolumeModifyIterAsyncRequest{} -} - -// NewVolumeModifyIterAsyncResponseResult is a factory method for creating new instances of VolumeModifyIterAsyncResponseResult objects -func NewVolumeModifyIterAsyncResponseResult() *VolumeModifyIterAsyncResponseResult { - return &VolumeModifyIterAsyncResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterAsyncRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterAsyncResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeModifyIterAsyncRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeModifyIterAsyncResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeModifyIterAsyncRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeModifyIterAsyncResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeModifyIterAsyncRequest", NewVolumeModifyIterAsyncResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeModifyIterAsyncResponse), err -} - -// VolumeModifyIterAsyncRequestAttributes is a wrapper -type VolumeModifyIterAsyncRequestAttributes struct { - XMLName xml.Name `xml:"attributes"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncRequestAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeModifyIterAsyncRequestAttributes) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequestAttributes) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeModifyIterAsyncRequestAttributes { - o.VolumeAttributesPtr = &newValue - return o -} - -// Attributes is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) Attributes() VolumeModifyIterAsyncRequestAttributes { - r := *o.AttributesPtr - return r -} - -// SetAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetAttributes(newValue VolumeModifyIterAsyncRequestAttributes) *VolumeModifyIterAsyncRequest { - o.AttributesPtr = &newValue - return o -} - -// ContinueOnFailure is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) ContinueOnFailure() bool { - r := *o.ContinueOnFailurePtr - return r -} - -// SetContinueOnFailure is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetContinueOnFailure(newValue bool) *VolumeModifyIterAsyncRequest { - o.ContinueOnFailurePtr = &newValue - return o -} - -// MaxFailureCount is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) MaxFailureCount() int { - r := *o.MaxFailureCountPtr - return r -} - -// SetMaxFailureCount is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetMaxFailureCount(newValue int) *VolumeModifyIterAsyncRequest { - o.MaxFailureCountPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetMaxRecords(newValue int) *VolumeModifyIterAsyncRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// VolumeModifyIterAsyncRequestQuery is a wrapper -type VolumeModifyIterAsyncRequestQuery struct { - XMLName xml.Name `xml:"query"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeModifyIterAsyncRequestQuery) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequestQuery) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeModifyIterAsyncRequestQuery { - o.VolumeAttributesPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) Query() VolumeModifyIterAsyncRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetQuery(newValue VolumeModifyIterAsyncRequestQuery) *VolumeModifyIterAsyncRequest { - o.QueryPtr = &newValue - return o -} - -// ReturnFailureList is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) ReturnFailureList() bool { - r := *o.ReturnFailureListPtr - return r -} - -// SetReturnFailureList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetReturnFailureList(newValue bool) *VolumeModifyIterAsyncRequest { - o.ReturnFailureListPtr = &newValue - return o -} - -// ReturnSuccessList is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) ReturnSuccessList() bool { - r := *o.ReturnSuccessListPtr - return r -} - -// SetReturnSuccessList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetReturnSuccessList(newValue bool) *VolumeModifyIterAsyncRequest { - o.ReturnSuccessListPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *VolumeModifyIterAsyncRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncRequest) SetTag(newValue string) *VolumeModifyIterAsyncRequest { - o.TagPtr = &newValue - return o -} - -// VolumeModifyIterAsyncResponseResultFailureList is a wrapper -type VolumeModifyIterAsyncResponseResultFailureList struct { - XMLName xml.Name `xml:"failure-list"` - VolumeModifyIterAsyncInfoPtr []VolumeModifyIterAsyncInfoType `xml:"volume-modify-iter-async-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncResponseResultFailureList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeModifyIterAsyncInfo is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResultFailureList) VolumeModifyIterAsyncInfo() []VolumeModifyIterAsyncInfoType { - r := o.VolumeModifyIterAsyncInfoPtr - return r -} - -// SetVolumeModifyIterAsyncInfo is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResultFailureList) SetVolumeModifyIterAsyncInfo(newValue []VolumeModifyIterAsyncInfoType) *VolumeModifyIterAsyncResponseResultFailureList { - newSlice := make([]VolumeModifyIterAsyncInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterAsyncInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResultFailureList) values() []VolumeModifyIterAsyncInfoType { - r := o.VolumeModifyIterAsyncInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResultFailureList) setValues(newValue []VolumeModifyIterAsyncInfoType) *VolumeModifyIterAsyncResponseResultFailureList { - newSlice := make([]VolumeModifyIterAsyncInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterAsyncInfoPtr = newSlice - return o -} - -// FailureList is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResult) FailureList() VolumeModifyIterAsyncResponseResultFailureList { - r := *o.FailureListPtr - return r -} - -// SetFailureList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResult) SetFailureList(newValue VolumeModifyIterAsyncResponseResultFailureList) *VolumeModifyIterAsyncResponseResult { - o.FailureListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResult) SetNextTag(newValue string) *VolumeModifyIterAsyncResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumFailed is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResult) NumFailed() int { - r := *o.NumFailedPtr - return r -} - -// SetNumFailed is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResult) SetNumFailed(newValue int) *VolumeModifyIterAsyncResponseResult { - o.NumFailedPtr = &newValue - return o -} - -// NumSucceeded is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResult) NumSucceeded() int { - r := *o.NumSucceededPtr - return r -} - -// SetNumSucceeded is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResult) SetNumSucceeded(newValue int) *VolumeModifyIterAsyncResponseResult { - o.NumSucceededPtr = &newValue - return o -} - -// VolumeModifyIterAsyncResponseResultSuccessList is a wrapper -type VolumeModifyIterAsyncResponseResultSuccessList struct { - XMLName xml.Name `xml:"success-list"` - VolumeModifyIterAsyncInfoPtr []VolumeModifyIterAsyncInfoType `xml:"volume-modify-iter-async-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncResponseResultSuccessList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeModifyIterAsyncInfo is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResultSuccessList) VolumeModifyIterAsyncInfo() []VolumeModifyIterAsyncInfoType { - r := o.VolumeModifyIterAsyncInfoPtr - return r -} - -// SetVolumeModifyIterAsyncInfo is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResultSuccessList) SetVolumeModifyIterAsyncInfo(newValue []VolumeModifyIterAsyncInfoType) *VolumeModifyIterAsyncResponseResultSuccessList { - newSlice := make([]VolumeModifyIterAsyncInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterAsyncInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResultSuccessList) values() []VolumeModifyIterAsyncInfoType { - r := o.VolumeModifyIterAsyncInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResultSuccessList) setValues(newValue []VolumeModifyIterAsyncInfoType) *VolumeModifyIterAsyncResponseResultSuccessList { - newSlice := make([]VolumeModifyIterAsyncInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterAsyncInfoPtr = newSlice - return o -} - -// SuccessList is a 'getter' method -func (o *VolumeModifyIterAsyncResponseResult) SuccessList() VolumeModifyIterAsyncResponseResultSuccessList { - r := *o.SuccessListPtr - return r -} - -// SetSuccessList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncResponseResult) SetSuccessList(newValue VolumeModifyIterAsyncResponseResultSuccessList) *VolumeModifyIterAsyncResponseResult { - o.SuccessListPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-modify-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-modify-iter.go deleted file mode 100644 index fa9b65ddc..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-modify-iter.go +++ /dev/null @@ -1,395 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeModifyIterRequest is a structure to represent a volume-modify-iter Request ZAPI object -type VolumeModifyIterRequest struct { - XMLName xml.Name `xml:"volume-modify-iter"` - AttributesPtr *VolumeModifyIterRequestAttributes `xml:"attributes"` - ContinueOnFailurePtr *bool `xml:"continue-on-failure"` - MaxFailureCountPtr *int `xml:"max-failure-count"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *VolumeModifyIterRequestQuery `xml:"query"` - ReturnFailureListPtr *bool `xml:"return-failure-list"` - ReturnSuccessListPtr *bool `xml:"return-success-list"` - TagPtr *string `xml:"tag"` -} - -// VolumeModifyIterResponse is a structure to represent a volume-modify-iter Response ZAPI object -type VolumeModifyIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeModifyIterResponseResult `xml:"results"` -} - -// NewVolumeModifyIterResponse is a factory method for creating new instances of VolumeModifyIterResponse objects -func NewVolumeModifyIterResponse() *VolumeModifyIterResponse { - return &VolumeModifyIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeModifyIterResponseResult is a structure to represent a volume-modify-iter Response Result ZAPI object -type VolumeModifyIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - FailureListPtr *VolumeModifyIterResponseResultFailureList `xml:"failure-list"` - NextTagPtr *string `xml:"next-tag"` - NumFailedPtr *int `xml:"num-failed"` - NumSucceededPtr *int `xml:"num-succeeded"` - SuccessListPtr *VolumeModifyIterResponseResultSuccessList `xml:"success-list"` -} - -// NewVolumeModifyIterRequest is a factory method for creating new instances of VolumeModifyIterRequest objects -func NewVolumeModifyIterRequest() *VolumeModifyIterRequest { - return &VolumeModifyIterRequest{} -} - -// NewVolumeModifyIterResponseResult is a factory method for creating new instances of VolumeModifyIterResponseResult objects -func NewVolumeModifyIterResponseResult() *VolumeModifyIterResponseResult { - return &VolumeModifyIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeModifyIterRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeModifyIterResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeModifyIterRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeModifyIterResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeModifyIterRequest", NewVolumeModifyIterResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeModifyIterResponse), err -} - -// VolumeModifyIterRequestAttributes is a wrapper -type VolumeModifyIterRequestAttributes struct { - XMLName xml.Name `xml:"attributes"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterRequestAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeModifyIterRequestAttributes) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequestAttributes) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeModifyIterRequestAttributes { - o.VolumeAttributesPtr = &newValue - return o -} - -// Attributes is a 'getter' method -func (o *VolumeModifyIterRequest) Attributes() VolumeModifyIterRequestAttributes { - r := *o.AttributesPtr - return r -} - -// SetAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetAttributes(newValue VolumeModifyIterRequestAttributes) *VolumeModifyIterRequest { - o.AttributesPtr = &newValue - return o -} - -// ContinueOnFailure is a 'getter' method -func (o *VolumeModifyIterRequest) ContinueOnFailure() bool { - r := *o.ContinueOnFailurePtr - return r -} - -// SetContinueOnFailure is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetContinueOnFailure(newValue bool) *VolumeModifyIterRequest { - o.ContinueOnFailurePtr = &newValue - return o -} - -// MaxFailureCount is a 'getter' method -func (o *VolumeModifyIterRequest) MaxFailureCount() int { - r := *o.MaxFailureCountPtr - return r -} - -// SetMaxFailureCount is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetMaxFailureCount(newValue int) *VolumeModifyIterRequest { - o.MaxFailureCountPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *VolumeModifyIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetMaxRecords(newValue int) *VolumeModifyIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// VolumeModifyIterRequestQuery is a wrapper -type VolumeModifyIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeModifyIterRequestQuery) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequestQuery) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeModifyIterRequestQuery { - o.VolumeAttributesPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *VolumeModifyIterRequest) Query() VolumeModifyIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetQuery(newValue VolumeModifyIterRequestQuery) *VolumeModifyIterRequest { - o.QueryPtr = &newValue - return o -} - -// ReturnFailureList is a 'getter' method -func (o *VolumeModifyIterRequest) ReturnFailureList() bool { - r := *o.ReturnFailureListPtr - return r -} - -// SetReturnFailureList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetReturnFailureList(newValue bool) *VolumeModifyIterRequest { - o.ReturnFailureListPtr = &newValue - return o -} - -// ReturnSuccessList is a 'getter' method -func (o *VolumeModifyIterRequest) ReturnSuccessList() bool { - r := *o.ReturnSuccessListPtr - return r -} - -// SetReturnSuccessList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetReturnSuccessList(newValue bool) *VolumeModifyIterRequest { - o.ReturnSuccessListPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *VolumeModifyIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterRequest) SetTag(newValue string) *VolumeModifyIterRequest { - o.TagPtr = &newValue - return o -} - -// VolumeModifyIterResponseResultFailureList is a wrapper -type VolumeModifyIterResponseResultFailureList struct { - XMLName xml.Name `xml:"failure-list"` - VolumeModifyIterInfoPtr []VolumeModifyIterInfoType `xml:"volume-modify-iter-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterResponseResultFailureList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeModifyIterInfo is a 'getter' method -func (o *VolumeModifyIterResponseResultFailureList) VolumeModifyIterInfo() []VolumeModifyIterInfoType { - r := o.VolumeModifyIterInfoPtr - return r -} - -// SetVolumeModifyIterInfo is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResultFailureList) SetVolumeModifyIterInfo(newValue []VolumeModifyIterInfoType) *VolumeModifyIterResponseResultFailureList { - newSlice := make([]VolumeModifyIterInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VolumeModifyIterResponseResultFailureList) values() []VolumeModifyIterInfoType { - r := o.VolumeModifyIterInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResultFailureList) setValues(newValue []VolumeModifyIterInfoType) *VolumeModifyIterResponseResultFailureList { - newSlice := make([]VolumeModifyIterInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterInfoPtr = newSlice - return o -} - -// FailureList is a 'getter' method -func (o *VolumeModifyIterResponseResult) FailureList() VolumeModifyIterResponseResultFailureList { - r := *o.FailureListPtr - return r -} - -// SetFailureList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResult) SetFailureList(newValue VolumeModifyIterResponseResultFailureList) *VolumeModifyIterResponseResult { - o.FailureListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *VolumeModifyIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResult) SetNextTag(newValue string) *VolumeModifyIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumFailed is a 'getter' method -func (o *VolumeModifyIterResponseResult) NumFailed() int { - r := *o.NumFailedPtr - return r -} - -// SetNumFailed is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResult) SetNumFailed(newValue int) *VolumeModifyIterResponseResult { - o.NumFailedPtr = &newValue - return o -} - -// NumSucceeded is a 'getter' method -func (o *VolumeModifyIterResponseResult) NumSucceeded() int { - r := *o.NumSucceededPtr - return r -} - -// SetNumSucceeded is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResult) SetNumSucceeded(newValue int) *VolumeModifyIterResponseResult { - o.NumSucceededPtr = &newValue - return o -} - -// VolumeModifyIterResponseResultSuccessList is a wrapper -type VolumeModifyIterResponseResultSuccessList struct { - XMLName xml.Name `xml:"success-list"` - VolumeModifyIterInfoPtr []VolumeModifyIterInfoType `xml:"volume-modify-iter-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterResponseResultSuccessList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeModifyIterInfo is a 'getter' method -func (o *VolumeModifyIterResponseResultSuccessList) VolumeModifyIterInfo() []VolumeModifyIterInfoType { - r := o.VolumeModifyIterInfoPtr - return r -} - -// SetVolumeModifyIterInfo is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResultSuccessList) SetVolumeModifyIterInfo(newValue []VolumeModifyIterInfoType) *VolumeModifyIterResponseResultSuccessList { - newSlice := make([]VolumeModifyIterInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VolumeModifyIterResponseResultSuccessList) values() []VolumeModifyIterInfoType { - r := o.VolumeModifyIterInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResultSuccessList) setValues(newValue []VolumeModifyIterInfoType) *VolumeModifyIterResponseResultSuccessList { - newSlice := make([]VolumeModifyIterInfoType, len(newValue)) - copy(newSlice, newValue) - o.VolumeModifyIterInfoPtr = newSlice - return o -} - -// SuccessList is a 'getter' method -func (o *VolumeModifyIterResponseResult) SuccessList() VolumeModifyIterResponseResultSuccessList { - r := *o.SuccessListPtr - return r -} - -// SetSuccessList is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterResponseResult) SetSuccessList(newValue VolumeModifyIterResponseResultSuccessList) *VolumeModifyIterResponseResult { - o.SuccessListPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-mount.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-mount.go deleted file mode 100644 index 0e28125d9..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-mount.go +++ /dev/null @@ -1,154 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeMountRequest is a structure to represent a volume-mount Request ZAPI object -type VolumeMountRequest struct { - XMLName xml.Name `xml:"volume-mount"` - ActivateJunctionPtr *bool `xml:"activate-junction"` - ExportPolicyOverridePtr *bool `xml:"export-policy-override"` - JunctionPathPtr *string `xml:"junction-path"` - VolumeNamePtr *string `xml:"volume-name"` -} - -// VolumeMountResponse is a structure to represent a volume-mount Response ZAPI object -type VolumeMountResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeMountResponseResult `xml:"results"` -} - -// NewVolumeMountResponse is a factory method for creating new instances of VolumeMountResponse objects -func NewVolumeMountResponse() *VolumeMountResponse { - return &VolumeMountResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeMountResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeMountResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeMountResponseResult is a structure to represent a volume-mount Response Result ZAPI object -type VolumeMountResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeMountRequest is a factory method for creating new instances of VolumeMountRequest objects -func NewVolumeMountRequest() *VolumeMountRequest { - return &VolumeMountRequest{} -} - -// NewVolumeMountResponseResult is a factory method for creating new instances of VolumeMountResponseResult objects -func NewVolumeMountResponseResult() *VolumeMountResponseResult { - return &VolumeMountResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeMountRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeMountResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeMountRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeMountResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeMountRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeMountResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeMountRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeMountResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeMountRequest", NewVolumeMountResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeMountResponse), err -} - -// ActivateJunction is a 'getter' method -func (o *VolumeMountRequest) ActivateJunction() bool { - r := *o.ActivateJunctionPtr - return r -} - -// SetActivateJunction is a fluent style 'setter' method that can be chained -func (o *VolumeMountRequest) SetActivateJunction(newValue bool) *VolumeMountRequest { - o.ActivateJunctionPtr = &newValue - return o -} - -// ExportPolicyOverride is a 'getter' method -func (o *VolumeMountRequest) ExportPolicyOverride() bool { - r := *o.ExportPolicyOverridePtr - return r -} - -// SetExportPolicyOverride is a fluent style 'setter' method that can be chained -func (o *VolumeMountRequest) SetExportPolicyOverride(newValue bool) *VolumeMountRequest { - o.ExportPolicyOverridePtr = &newValue - return o -} - -// JunctionPath is a 'getter' method -func (o *VolumeMountRequest) JunctionPath() string { - r := *o.JunctionPathPtr - return r -} - -// SetJunctionPath is a fluent style 'setter' method that can be chained -func (o *VolumeMountRequest) SetJunctionPath(newValue string) *VolumeMountRequest { - o.JunctionPathPtr = &newValue - return o -} - -// VolumeName is a 'getter' method -func (o *VolumeMountRequest) VolumeName() string { - r := *o.VolumeNamePtr - return r -} - -// SetVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeMountRequest) SetVolumeName(newValue string) *VolumeMountRequest { - o.VolumeNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-offline.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-offline.go deleted file mode 100644 index 4818304dd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-offline.go +++ /dev/null @@ -1,115 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeOfflineRequest is a structure to represent a volume-offline Request ZAPI object -type VolumeOfflineRequest struct { - XMLName xml.Name `xml:"volume-offline"` - NamePtr *string `xml:"name"` -} - -// VolumeOfflineResponse is a structure to represent a volume-offline Response ZAPI object -type VolumeOfflineResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeOfflineResponseResult `xml:"results"` -} - -// NewVolumeOfflineResponse is a factory method for creating new instances of VolumeOfflineResponse objects -func NewVolumeOfflineResponse() *VolumeOfflineResponse { - return &VolumeOfflineResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeOfflineResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeOfflineResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeOfflineResponseResult is a structure to represent a volume-offline Response Result ZAPI object -type VolumeOfflineResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeOfflineRequest is a factory method for creating new instances of VolumeOfflineRequest objects -func NewVolumeOfflineRequest() *VolumeOfflineRequest { - return &VolumeOfflineRequest{} -} - -// NewVolumeOfflineResponseResult is a factory method for creating new instances of VolumeOfflineResponseResult objects -func NewVolumeOfflineResponseResult() *VolumeOfflineResponseResult { - return &VolumeOfflineResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeOfflineRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeOfflineResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeOfflineRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeOfflineResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeOfflineRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeOfflineResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeOfflineRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeOfflineResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeOfflineRequest", NewVolumeOfflineResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeOfflineResponse), err -} - -// Name is a 'getter' method -func (o *VolumeOfflineRequest) Name() string { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *VolumeOfflineRequest) SetName(newValue string) *VolumeOfflineRequest { - o.NamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-rename.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-rename.go deleted file mode 100644 index 6b0ffca1d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-rename.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeRenameRequest is a structure to represent a volume-rename Request ZAPI object -type VolumeRenameRequest struct { - XMLName xml.Name `xml:"volume-rename"` - NewVolumeNamePtr *string `xml:"new-volume-name"` - VolumePtr *string `xml:"volume"` -} - -// VolumeRenameResponse is a structure to represent a volume-rename Response ZAPI object -type VolumeRenameResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeRenameResponseResult `xml:"results"` -} - -// NewVolumeRenameResponse is a factory method for creating new instances of VolumeRenameResponse objects -func NewVolumeRenameResponse() *VolumeRenameResponse { - return &VolumeRenameResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeRenameResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeRenameResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeRenameResponseResult is a structure to represent a volume-rename Response Result ZAPI object -type VolumeRenameResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeRenameRequest is a factory method for creating new instances of VolumeRenameRequest objects -func NewVolumeRenameRequest() *VolumeRenameRequest { - return &VolumeRenameRequest{} -} - -// NewVolumeRenameResponseResult is a factory method for creating new instances of VolumeRenameResponseResult objects -func NewVolumeRenameResponseResult() *VolumeRenameResponseResult { - return &VolumeRenameResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeRenameRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeRenameResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeRenameRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeRenameResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeRenameRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeRenameResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeRenameRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeRenameResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeRenameRequest", NewVolumeRenameResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeRenameResponse), err -} - -// NewVolumeName is a 'getter' method -func (o *VolumeRenameRequest) NewVolumeName() string { - r := *o.NewVolumeNamePtr - return r -} - -// SetNewVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeRenameRequest) SetNewVolumeName(newValue string) *VolumeRenameRequest { - o.NewVolumeNamePtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *VolumeRenameRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *VolumeRenameRequest) SetVolume(newValue string) *VolumeRenameRequest { - o.VolumePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-size-async.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-size-async.go deleted file mode 100644 index 7f4e72b54..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-size-async.go +++ /dev/null @@ -1,193 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSizeAsyncRequest is a structure to represent a volume-size-async Request ZAPI object -type VolumeSizeAsyncRequest struct { - XMLName xml.Name `xml:"volume-size-async"` - NewSizePtr *string `xml:"new-size"` - VolumeNamePtr *string `xml:"volume-name"` -} - -// VolumeSizeAsyncResponse is a structure to represent a volume-size-async Response ZAPI object -type VolumeSizeAsyncResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeSizeAsyncResponseResult `xml:"results"` -} - -// NewVolumeSizeAsyncResponse is a factory method for creating new instances of VolumeSizeAsyncResponse objects -func NewVolumeSizeAsyncResponse() *VolumeSizeAsyncResponse { - return &VolumeSizeAsyncResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSizeAsyncResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSizeAsyncResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeSizeAsyncResponseResult is a structure to represent a volume-size-async Response Result ZAPI object -type VolumeSizeAsyncResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - ResultErrorCodePtr *int `xml:"result-error-code"` - ResultErrorMessagePtr *string `xml:"result-error-message"` - ResultJobidPtr *int `xml:"result-jobid"` - ResultStatusPtr *string `xml:"result-status"` - VolumeSizePtr *string `xml:"volume-size"` -} - -// NewVolumeSizeAsyncRequest is a factory method for creating new instances of VolumeSizeAsyncRequest objects -func NewVolumeSizeAsyncRequest() *VolumeSizeAsyncRequest { - return &VolumeSizeAsyncRequest{} -} - -// NewVolumeSizeAsyncResponseResult is a factory method for creating new instances of VolumeSizeAsyncResponseResult objects -func NewVolumeSizeAsyncResponseResult() *VolumeSizeAsyncResponseResult { - return &VolumeSizeAsyncResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSizeAsyncRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSizeAsyncResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSizeAsyncRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSizeAsyncResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeSizeAsyncRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeSizeAsyncResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeSizeAsyncRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeSizeAsyncResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeSizeAsyncRequest", NewVolumeSizeAsyncResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeSizeAsyncResponse), err -} - -// NewSize is a 'getter' method -func (o *VolumeSizeAsyncRequest) NewSize() string { - r := *o.NewSizePtr - return r -} - -// SetNewSize is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncRequest) SetNewSize(newValue string) *VolumeSizeAsyncRequest { - o.NewSizePtr = &newValue - return o -} - -// VolumeName is a 'getter' method -func (o *VolumeSizeAsyncRequest) VolumeName() string { - r := *o.VolumeNamePtr - return r -} - -// SetVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncRequest) SetVolumeName(newValue string) *VolumeSizeAsyncRequest { - o.VolumeNamePtr = &newValue - return o -} - -// ResultErrorCode is a 'getter' method -func (o *VolumeSizeAsyncResponseResult) ResultErrorCode() int { - r := *o.ResultErrorCodePtr - return r -} - -// SetResultErrorCode is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncResponseResult) SetResultErrorCode(newValue int) *VolumeSizeAsyncResponseResult { - o.ResultErrorCodePtr = &newValue - return o -} - -// ResultErrorMessage is a 'getter' method -func (o *VolumeSizeAsyncResponseResult) ResultErrorMessage() string { - r := *o.ResultErrorMessagePtr - return r -} - -// SetResultErrorMessage is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncResponseResult) SetResultErrorMessage(newValue string) *VolumeSizeAsyncResponseResult { - o.ResultErrorMessagePtr = &newValue - return o -} - -// ResultJobid is a 'getter' method -func (o *VolumeSizeAsyncResponseResult) ResultJobid() int { - r := *o.ResultJobidPtr - return r -} - -// SetResultJobid is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncResponseResult) SetResultJobid(newValue int) *VolumeSizeAsyncResponseResult { - o.ResultJobidPtr = &newValue - return o -} - -// ResultStatus is a 'getter' method -func (o *VolumeSizeAsyncResponseResult) ResultStatus() string { - r := *o.ResultStatusPtr - return r -} - -// SetResultStatus is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncResponseResult) SetResultStatus(newValue string) *VolumeSizeAsyncResponseResult { - o.ResultStatusPtr = &newValue - return o -} - -// VolumeSize is a 'getter' method -func (o *VolumeSizeAsyncResponseResult) VolumeSize() string { - r := *o.VolumeSizePtr - return r -} - -// SetVolumeSize is a fluent style 'setter' method that can be chained -func (o *VolumeSizeAsyncResponseResult) SetVolumeSize(newValue string) *VolumeSizeAsyncResponseResult { - o.VolumeSizePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-size.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-size.go deleted file mode 100644 index 3cd5f1f9b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-size.go +++ /dev/null @@ -1,180 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSizeRequest is a structure to represent a volume-size Request ZAPI object -type VolumeSizeRequest struct { - XMLName xml.Name `xml:"volume-size"` - NewSizePtr *string `xml:"new-size"` - VolumePtr *string `xml:"volume"` -} - -// VolumeSizeResponse is a structure to represent a volume-size Response ZAPI object -type VolumeSizeResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeSizeResponseResult `xml:"results"` -} - -// NewVolumeSizeResponse is a factory method for creating new instances of VolumeSizeResponse objects -func NewVolumeSizeResponse() *VolumeSizeResponse { - return &VolumeSizeResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSizeResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSizeResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeSizeResponseResult is a structure to represent a volume-size Response Result ZAPI object -type VolumeSizeResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - IsFixedSizeFlexVolumePtr *bool `xml:"is-fixed-size-flex-volume"` - IsReadonlyFlexVolumePtr *bool `xml:"is-readonly-flex-volume"` - IsReplicaFlexVolumePtr *bool `xml:"is-replica-flex-volume"` - VolumeSizePtr *string `xml:"volume-size"` -} - -// NewVolumeSizeRequest is a factory method for creating new instances of VolumeSizeRequest objects -func NewVolumeSizeRequest() *VolumeSizeRequest { - return &VolumeSizeRequest{} -} - -// NewVolumeSizeResponseResult is a factory method for creating new instances of VolumeSizeResponseResult objects -func NewVolumeSizeResponseResult() *VolumeSizeResponseResult { - return &VolumeSizeResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSizeRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSizeResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSizeRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSizeResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeSizeRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeSizeResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeSizeRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeSizeResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeSizeRequest", NewVolumeSizeResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeSizeResponse), err -} - -// NewSize is a 'getter' method -func (o *VolumeSizeRequest) NewSize() string { - r := *o.NewSizePtr - return r -} - -// SetNewSize is a fluent style 'setter' method that can be chained -func (o *VolumeSizeRequest) SetNewSize(newValue string) *VolumeSizeRequest { - o.NewSizePtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *VolumeSizeRequest) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *VolumeSizeRequest) SetVolume(newValue string) *VolumeSizeRequest { - o.VolumePtr = &newValue - return o -} - -// IsFixedSizeFlexVolume is a 'getter' method -func (o *VolumeSizeResponseResult) IsFixedSizeFlexVolume() bool { - r := *o.IsFixedSizeFlexVolumePtr - return r -} - -// SetIsFixedSizeFlexVolume is a fluent style 'setter' method that can be chained -func (o *VolumeSizeResponseResult) SetIsFixedSizeFlexVolume(newValue bool) *VolumeSizeResponseResult { - o.IsFixedSizeFlexVolumePtr = &newValue - return o -} - -// IsReadonlyFlexVolume is a 'getter' method -func (o *VolumeSizeResponseResult) IsReadonlyFlexVolume() bool { - r := *o.IsReadonlyFlexVolumePtr - return r -} - -// SetIsReadonlyFlexVolume is a fluent style 'setter' method that can be chained -func (o *VolumeSizeResponseResult) SetIsReadonlyFlexVolume(newValue bool) *VolumeSizeResponseResult { - o.IsReadonlyFlexVolumePtr = &newValue - return o -} - -// IsReplicaFlexVolume is a 'getter' method -func (o *VolumeSizeResponseResult) IsReplicaFlexVolume() bool { - r := *o.IsReplicaFlexVolumePtr - return r -} - -// SetIsReplicaFlexVolume is a fluent style 'setter' method that can be chained -func (o *VolumeSizeResponseResult) SetIsReplicaFlexVolume(newValue bool) *VolumeSizeResponseResult { - o.IsReplicaFlexVolumePtr = &newValue - return o -} - -// VolumeSize is a 'getter' method -func (o *VolumeSizeResponseResult) VolumeSize() string { - r := *o.VolumeSizePtr - return r -} - -// SetVolumeSize is a fluent style 'setter' method that can be chained -func (o *VolumeSizeResponseResult) SetVolumeSize(newValue string) *VolumeSizeResponseResult { - o.VolumeSizePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-unmount.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-unmount.go deleted file mode 100644 index af8b56183..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-volume-unmount.go +++ /dev/null @@ -1,128 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeUnmountRequest is a structure to represent a volume-unmount Request ZAPI object -type VolumeUnmountRequest struct { - XMLName xml.Name `xml:"volume-unmount"` - ForcePtr *bool `xml:"force"` - VolumeNamePtr *string `xml:"volume-name"` -} - -// VolumeUnmountResponse is a structure to represent a volume-unmount Response ZAPI object -type VolumeUnmountResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VolumeUnmountResponseResult `xml:"results"` -} - -// NewVolumeUnmountResponse is a factory method for creating new instances of VolumeUnmountResponse objects -func NewVolumeUnmountResponse() *VolumeUnmountResponse { - return &VolumeUnmountResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeUnmountResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VolumeUnmountResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VolumeUnmountResponseResult is a structure to represent a volume-unmount Response Result ZAPI object -type VolumeUnmountResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` -} - -// NewVolumeUnmountRequest is a factory method for creating new instances of VolumeUnmountRequest objects -func NewVolumeUnmountRequest() *VolumeUnmountRequest { - return &VolumeUnmountRequest{} -} - -// NewVolumeUnmountResponseResult is a factory method for creating new instances of VolumeUnmountResponseResult objects -func NewVolumeUnmountResponseResult() *VolumeUnmountResponseResult { - return &VolumeUnmountResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeUnmountRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VolumeUnmountResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeUnmountRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeUnmountResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeUnmountRequest) ExecuteUsing(zr *ZapiRunner) (*VolumeUnmountResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VolumeUnmountRequest) executeWithoutIteration(zr *ZapiRunner) (*VolumeUnmountResponse, error) { - result, err := zr.ExecuteUsing(o, "VolumeUnmountRequest", NewVolumeUnmountResponse()) - if result == nil { - return nil, err - } - return result.(*VolumeUnmountResponse), err -} - -// Force is a 'getter' method -func (o *VolumeUnmountRequest) Force() bool { - r := *o.ForcePtr - return r -} - -// SetForce is a fluent style 'setter' method that can be chained -func (o *VolumeUnmountRequest) SetForce(newValue bool) *VolumeUnmountRequest { - o.ForcePtr = &newValue - return o -} - -// VolumeName is a 'getter' method -func (o *VolumeUnmountRequest) VolumeName() string { - r := *o.VolumeNamePtr - return r -} - -// SetVolumeName is a fluent style 'setter' method that can be chained -func (o *VolumeUnmountRequest) SetVolumeName(newValue string) *VolumeUnmountRequest { - o.VolumeNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-get-iter.go deleted file mode 100644 index 1844f9abe..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-get-iter.go +++ /dev/null @@ -1,334 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VserverGetIterRequest is a structure to represent a vserver-get-iter Request ZAPI object -type VserverGetIterRequest struct { - XMLName xml.Name `xml:"vserver-get-iter"` - DesiredAttributesPtr *VserverGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *VserverGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` -} - -// VserverGetIterResponse is a structure to represent a vserver-get-iter Response ZAPI object -type VserverGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VserverGetIterResponseResult `xml:"results"` -} - -// NewVserverGetIterResponse is a factory method for creating new instances of VserverGetIterResponse objects -func NewVserverGetIterResponse() *VserverGetIterResponse { - return &VserverGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VserverGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VserverGetIterResponseResult is a structure to represent a vserver-get-iter Response Result ZAPI object -type VserverGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *VserverGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewVserverGetIterRequest is a factory method for creating new instances of VserverGetIterRequest objects -func NewVserverGetIterRequest() *VserverGetIterRequest { - return &VserverGetIterRequest{} -} - -// NewVserverGetIterResponseResult is a factory method for creating new instances of VserverGetIterResponseResult objects -func NewVserverGetIterResponseResult() *VserverGetIterResponseResult { - return &VserverGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VserverGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VserverGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VserverGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*VserverGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VserverGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*VserverGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "VserverGetIterRequest", NewVserverGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*VserverGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *VserverGetIterRequest) executeWithIteration(zr *ZapiRunner) (*VserverGetIterResponse, error) { - combined := NewVserverGetIterResponse() - combined.Result.SetAttributesList(VserverGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(VserverGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// VserverGetIterRequestDesiredAttributes is a wrapper -type VserverGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - VserverInfoPtr *VserverInfoType `xml:"vserver-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverInfo is a 'getter' method -func (o *VserverGetIterRequestDesiredAttributes) VserverInfo() VserverInfoType { - r := *o.VserverInfoPtr - return r -} - -// SetVserverInfo is a fluent style 'setter' method that can be chained -func (o *VserverGetIterRequestDesiredAttributes) SetVserverInfo(newValue VserverInfoType) *VserverGetIterRequestDesiredAttributes { - o.VserverInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *VserverGetIterRequest) DesiredAttributes() VserverGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *VserverGetIterRequest) SetDesiredAttributes(newValue VserverGetIterRequestDesiredAttributes) *VserverGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *VserverGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *VserverGetIterRequest) SetMaxRecords(newValue int) *VserverGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// VserverGetIterRequestQuery is a wrapper -type VserverGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - VserverInfoPtr *VserverInfoType `xml:"vserver-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverInfo is a 'getter' method -func (o *VserverGetIterRequestQuery) VserverInfo() VserverInfoType { - r := *o.VserverInfoPtr - return r -} - -// SetVserverInfo is a fluent style 'setter' method that can be chained -func (o *VserverGetIterRequestQuery) SetVserverInfo(newValue VserverInfoType) *VserverGetIterRequestQuery { - o.VserverInfoPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *VserverGetIterRequest) Query() VserverGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *VserverGetIterRequest) SetQuery(newValue VserverGetIterRequestQuery) *VserverGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *VserverGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *VserverGetIterRequest) SetTag(newValue string) *VserverGetIterRequest { - o.TagPtr = &newValue - return o -} - -// VserverGetIterResponseResultAttributesList is a wrapper -type VserverGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - VserverInfoPtr []VserverInfoType `xml:"vserver-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverInfo is a 'getter' method -func (o *VserverGetIterResponseResultAttributesList) VserverInfo() []VserverInfoType { - r := o.VserverInfoPtr - return r -} - -// SetVserverInfo is a fluent style 'setter' method that can be chained -func (o *VserverGetIterResponseResultAttributesList) SetVserverInfo(newValue []VserverInfoType) *VserverGetIterResponseResultAttributesList { - newSlice := make([]VserverInfoType, len(newValue)) - copy(newSlice, newValue) - o.VserverInfoPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VserverGetIterResponseResultAttributesList) values() []VserverInfoType { - r := o.VserverInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VserverGetIterResponseResultAttributesList) setValues(newValue []VserverInfoType) *VserverGetIterResponseResultAttributesList { - newSlice := make([]VserverInfoType, len(newValue)) - copy(newSlice, newValue) - o.VserverInfoPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *VserverGetIterResponseResult) AttributesList() VserverGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *VserverGetIterResponseResult) SetAttributesList(newValue VserverGetIterResponseResultAttributesList) *VserverGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *VserverGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *VserverGetIterResponseResult) SetNextTag(newValue string) *VserverGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *VserverGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *VserverGetIterResponseResult) SetNumRecords(newValue int) *VserverGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-get.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-get.go deleted file mode 100644 index 1e7350ba5..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-get.go +++ /dev/null @@ -1,186 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VserverGetRequest is a structure to represent a vserver-get Request ZAPI object -type VserverGetRequest struct { - XMLName xml.Name `xml:"vserver-get"` - DesiredAttributesPtr *VserverGetRequestDesiredAttributes `xml:"desired-attributes"` -} - -// VserverGetResponse is a structure to represent a vserver-get Response ZAPI object -type VserverGetResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VserverGetResponseResult `xml:"results"` -} - -// NewVserverGetResponse is a factory method for creating new instances of VserverGetResponse objects -func NewVserverGetResponse() *VserverGetResponse { - return &VserverGetResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VserverGetResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VserverGetResponseResult is a structure to represent a vserver-get Response Result ZAPI object -type VserverGetResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesPtr *VserverGetResponseResultAttributes `xml:"attributes"` -} - -// NewVserverGetRequest is a factory method for creating new instances of VserverGetRequest objects -func NewVserverGetRequest() *VserverGetRequest { - return &VserverGetRequest{} -} - -// NewVserverGetResponseResult is a factory method for creating new instances of VserverGetResponseResult objects -func NewVserverGetResponseResult() *VserverGetResponseResult { - return &VserverGetResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VserverGetRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VserverGetResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VserverGetRequest) ExecuteUsing(zr *ZapiRunner) (*VserverGetResponse, error) { - return o.executeWithoutIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VserverGetRequest) executeWithoutIteration(zr *ZapiRunner) (*VserverGetResponse, error) { - result, err := zr.ExecuteUsing(o, "VserverGetRequest", NewVserverGetResponse()) - if result == nil { - return nil, err - } - return result.(*VserverGetResponse), err -} - -// VserverGetRequestDesiredAttributes is a wrapper -type VserverGetRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - VserverInfoPtr *VserverInfoType `xml:"vserver-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverInfo is a 'getter' method -func (o *VserverGetRequestDesiredAttributes) VserverInfo() VserverInfoType { - r := *o.VserverInfoPtr - return r -} - -// SetVserverInfo is a fluent style 'setter' method that can be chained -func (o *VserverGetRequestDesiredAttributes) SetVserverInfo(newValue VserverInfoType) *VserverGetRequestDesiredAttributes { - o.VserverInfoPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *VserverGetRequest) DesiredAttributes() VserverGetRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *VserverGetRequest) SetDesiredAttributes(newValue VserverGetRequestDesiredAttributes) *VserverGetRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// VserverGetResponseResultAttributes is a wrapper -type VserverGetResponseResultAttributes struct { - XMLName xml.Name `xml:"attributes"` - VserverInfoPtr *VserverInfoType `xml:"vserver-info"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverGetResponseResultAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverInfo is a 'getter' method -func (o *VserverGetResponseResultAttributes) VserverInfo() VserverInfoType { - r := *o.VserverInfoPtr - return r -} - -// SetVserverInfo is a fluent style 'setter' method that can be chained -func (o *VserverGetResponseResultAttributes) SetVserverInfo(newValue VserverInfoType) *VserverGetResponseResultAttributes { - o.VserverInfoPtr = &newValue - return o -} - -// values is a 'getter' method -func (o *VserverGetResponseResultAttributes) values() VserverInfoType { - r := *o.VserverInfoPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VserverGetResponseResultAttributes) setValues(newValue VserverInfoType) *VserverGetResponseResultAttributes { - o.VserverInfoPtr = &newValue - return o -} - -// Attributes is a 'getter' method -func (o *VserverGetResponseResult) Attributes() VserverGetResponseResultAttributes { - r := *o.AttributesPtr - return r -} - -// SetAttributes is a fluent style 'setter' method that can be chained -func (o *VserverGetResponseResult) SetAttributes(newValue VserverGetResponseResultAttributes) *VserverGetResponseResult { - o.AttributesPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-show-aggr-get-iter.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-show-aggr-get-iter.go deleted file mode 100644 index 4d3dcc4d9..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/api-vserver-show-aggr-get-iter.go +++ /dev/null @@ -1,347 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VserverShowAggrGetIterRequest is a structure to represent a vserver-show-aggr-get-iter Request ZAPI object -type VserverShowAggrGetIterRequest struct { - XMLName xml.Name `xml:"vserver-show-aggr-get-iter"` - DesiredAttributesPtr *VserverShowAggrGetIterRequestDesiredAttributes `xml:"desired-attributes"` - MaxRecordsPtr *int `xml:"max-records"` - QueryPtr *VserverShowAggrGetIterRequestQuery `xml:"query"` - TagPtr *string `xml:"tag"` - VserverPtr *string `xml:"vserver"` -} - -// VserverShowAggrGetIterResponse is a structure to represent a vserver-show-aggr-get-iter Response ZAPI object -type VserverShowAggrGetIterResponse struct { - XMLName xml.Name `xml:"netapp"` - ResponseVersion string `xml:"version,attr"` - ResponseXmlns string `xml:"xmlns,attr"` - Result VserverShowAggrGetIterResponseResult `xml:"results"` -} - -// NewVserverShowAggrGetIterResponse is a factory method for creating new instances of VserverShowAggrGetIterResponse objects -func NewVserverShowAggrGetIterResponse() *VserverShowAggrGetIterResponse { - return &VserverShowAggrGetIterResponse{} -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverShowAggrGetIterResponse) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ToXML converts this object into an xml string representation -func (o *VserverShowAggrGetIterResponse) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// VserverShowAggrGetIterResponseResult is a structure to represent a vserver-show-aggr-get-iter Response Result ZAPI object -type VserverShowAggrGetIterResponseResult struct { - XMLName xml.Name `xml:"results"` - ResultStatusAttr string `xml:"status,attr"` - ResultReasonAttr string `xml:"reason,attr"` - ResultErrnoAttr string `xml:"errno,attr"` - AttributesListPtr *VserverShowAggrGetIterResponseResultAttributesList `xml:"attributes-list"` - NextTagPtr *string `xml:"next-tag"` - NumRecordsPtr *int `xml:"num-records"` -} - -// NewVserverShowAggrGetIterRequest is a factory method for creating new instances of VserverShowAggrGetIterRequest objects -func NewVserverShowAggrGetIterRequest() *VserverShowAggrGetIterRequest { - return &VserverShowAggrGetIterRequest{} -} - -// NewVserverShowAggrGetIterResponseResult is a factory method for creating new instances of VserverShowAggrGetIterResponseResult objects -func NewVserverShowAggrGetIterResponseResult() *VserverShowAggrGetIterResponseResult { - return &VserverShowAggrGetIterResponseResult{} -} - -// ToXML converts this object into an xml string representation -func (o *VserverShowAggrGetIterRequest) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// ToXML converts this object into an xml string representation -func (o *VserverShowAggrGetIterResponseResult) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverShowAggrGetIterRequest) String() string { - return ToString(reflect.ValueOf(o)) -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverShowAggrGetIterResponseResult) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VserverShowAggrGetIterRequest) ExecuteUsing(zr *ZapiRunner) (*VserverShowAggrGetIterResponse, error) { - return o.executeWithIteration(zr) -} - -// executeWithoutIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer - -func (o *VserverShowAggrGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*VserverShowAggrGetIterResponse, error) { - result, err := zr.ExecuteUsing(o, "VserverShowAggrGetIterRequest", NewVserverShowAggrGetIterResponse()) - if result == nil { - return nil, err - } - return result.(*VserverShowAggrGetIterResponse), err -} - -// executeWithIteration converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *VserverShowAggrGetIterRequest) executeWithIteration(zr *ZapiRunner) (*VserverShowAggrGetIterResponse, error) { - combined := NewVserverShowAggrGetIterResponse() - combined.Result.SetAttributesList(VserverShowAggrGetIterResponseResultAttributesList{}) - var nextTagPtr *string - done := false - for done != true { - n, err := o.executeWithoutIteration(zr) - - if err != nil { - return nil, err - } - nextTagPtr = n.Result.NextTagPtr - if nextTagPtr == nil { - done = true - } else { - o.SetTag(*nextTagPtr) - } - - if n.Result.NumRecordsPtr == nil { - done = true - } else { - recordsRead := n.Result.NumRecords() - if recordsRead == 0 { - done = true - } - } - - if n.Result.AttributesListPtr != nil { - if combined.Result.AttributesListPtr == nil { - combined.Result.SetAttributesList(VserverShowAggrGetIterResponseResultAttributesList{}) - } - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - - resultAttributesList := n.Result.AttributesList() - resultAttributes := resultAttributesList.values() - - combined.Result.AttributesListPtr.setValues(append(combinedAttributes, resultAttributes...)) - } - - if done == true { - - combined.Result.ResultErrnoAttr = n.Result.ResultErrnoAttr - combined.Result.ResultReasonAttr = n.Result.ResultReasonAttr - combined.Result.ResultStatusAttr = n.Result.ResultStatusAttr - - combinedAttributesList := combined.Result.AttributesList() - combinedAttributes := combinedAttributesList.values() - combined.Result.SetNumRecords(len(combinedAttributes)) - - } - } - return combined, nil -} - -// VserverShowAggrGetIterRequestDesiredAttributes is a wrapper -type VserverShowAggrGetIterRequestDesiredAttributes struct { - XMLName xml.Name `xml:"desired-attributes"` - ShowAggregatesPtr *ShowAggregatesType `xml:"show-aggregates"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverShowAggrGetIterRequestDesiredAttributes) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ShowAggregates is a 'getter' method -func (o *VserverShowAggrGetIterRequestDesiredAttributes) ShowAggregates() ShowAggregatesType { - r := *o.ShowAggregatesPtr - return r -} - -// SetShowAggregates is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequestDesiredAttributes) SetShowAggregates(newValue ShowAggregatesType) *VserverShowAggrGetIterRequestDesiredAttributes { - o.ShowAggregatesPtr = &newValue - return o -} - -// DesiredAttributes is a 'getter' method -func (o *VserverShowAggrGetIterRequest) DesiredAttributes() VserverShowAggrGetIterRequestDesiredAttributes { - r := *o.DesiredAttributesPtr - return r -} - -// SetDesiredAttributes is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequest) SetDesiredAttributes(newValue VserverShowAggrGetIterRequestDesiredAttributes) *VserverShowAggrGetIterRequest { - o.DesiredAttributesPtr = &newValue - return o -} - -// MaxRecords is a 'getter' method -func (o *VserverShowAggrGetIterRequest) MaxRecords() int { - r := *o.MaxRecordsPtr - return r -} - -// SetMaxRecords is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequest) SetMaxRecords(newValue int) *VserverShowAggrGetIterRequest { - o.MaxRecordsPtr = &newValue - return o -} - -// VserverShowAggrGetIterRequestQuery is a wrapper -type VserverShowAggrGetIterRequestQuery struct { - XMLName xml.Name `xml:"query"` - ShowAggregatesPtr *ShowAggregatesType `xml:"show-aggregates"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverShowAggrGetIterRequestQuery) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ShowAggregates is a 'getter' method -func (o *VserverShowAggrGetIterRequestQuery) ShowAggregates() ShowAggregatesType { - r := *o.ShowAggregatesPtr - return r -} - -// SetShowAggregates is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequestQuery) SetShowAggregates(newValue ShowAggregatesType) *VserverShowAggrGetIterRequestQuery { - o.ShowAggregatesPtr = &newValue - return o -} - -// Query is a 'getter' method -func (o *VserverShowAggrGetIterRequest) Query() VserverShowAggrGetIterRequestQuery { - r := *o.QueryPtr - return r -} - -// SetQuery is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequest) SetQuery(newValue VserverShowAggrGetIterRequestQuery) *VserverShowAggrGetIterRequest { - o.QueryPtr = &newValue - return o -} - -// Tag is a 'getter' method -func (o *VserverShowAggrGetIterRequest) Tag() string { - r := *o.TagPtr - return r -} - -// SetTag is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequest) SetTag(newValue string) *VserverShowAggrGetIterRequest { - o.TagPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *VserverShowAggrGetIterRequest) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterRequest) SetVserver(newValue string) *VserverShowAggrGetIterRequest { - o.VserverPtr = &newValue - return o -} - -// VserverShowAggrGetIterResponseResultAttributesList is a wrapper -type VserverShowAggrGetIterResponseResultAttributesList struct { - XMLName xml.Name `xml:"attributes-list"` - ShowAggregatesPtr []ShowAggregatesType `xml:"show-aggregates"` -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverShowAggrGetIterResponseResultAttributesList) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ShowAggregates is a 'getter' method -func (o *VserverShowAggrGetIterResponseResultAttributesList) ShowAggregates() []ShowAggregatesType { - r := o.ShowAggregatesPtr - return r -} - -// SetShowAggregates is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterResponseResultAttributesList) SetShowAggregates(newValue []ShowAggregatesType) *VserverShowAggrGetIterResponseResultAttributesList { - newSlice := make([]ShowAggregatesType, len(newValue)) - copy(newSlice, newValue) - o.ShowAggregatesPtr = newSlice - return o -} - -// values is a 'getter' method -func (o *VserverShowAggrGetIterResponseResultAttributesList) values() []ShowAggregatesType { - r := o.ShowAggregatesPtr - return r -} - -// setValues is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterResponseResultAttributesList) setValues(newValue []ShowAggregatesType) *VserverShowAggrGetIterResponseResultAttributesList { - newSlice := make([]ShowAggregatesType, len(newValue)) - copy(newSlice, newValue) - o.ShowAggregatesPtr = newSlice - return o -} - -// AttributesList is a 'getter' method -func (o *VserverShowAggrGetIterResponseResult) AttributesList() VserverShowAggrGetIterResponseResultAttributesList { - r := *o.AttributesListPtr - return r -} - -// SetAttributesList is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterResponseResult) SetAttributesList(newValue VserverShowAggrGetIterResponseResultAttributesList) *VserverShowAggrGetIterResponseResult { - o.AttributesListPtr = &newValue - return o -} - -// NextTag is a 'getter' method -func (o *VserverShowAggrGetIterResponseResult) NextTag() string { - r := *o.NextTagPtr - return r -} - -// SetNextTag is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterResponseResult) SetNextTag(newValue string) *VserverShowAggrGetIterResponseResult { - o.NextTagPtr = &newValue - return o -} - -// NumRecords is a 'getter' method -func (o *VserverShowAggrGetIterResponseResult) NumRecords() int { - r := *o.NumRecordsPtr - return r -} - -// SetNumRecords is a fluent style 'setter' method that can be chained -func (o *VserverShowAggrGetIterResponseResult) SetNumRecords(newValue int) *VserverShowAggrGetIterResponseResult { - o.NumRecordsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/common.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/common.go deleted file mode 100644 index d950e6320..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/common.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package azgo - -import ( - "bytes" - "crypto/tls" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "time" - - tridentconfig "github.com/netapp/trident/config" - log "github.com/sirupsen/logrus" -) - -type ZAPIRequest interface { - ToXML() (string, error) -} - -type ZAPIResponseIterable interface { - NextTag() string -} - -type ZapiRunner struct { - ManagementLIF string - SVM string - Username string - Password string - Secure bool - OntapiVersion string - DebugTraceFlags map[string]bool // Example: {"api":false, "method":true} -} - -// SendZapi sends the provided ZAPIRequest to the Ontap system -func (o *ZapiRunner) SendZapi(r ZAPIRequest) (*http.Response, error) { - - if o.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "SendZapi", "Type": "ZapiRunner"} - log.WithFields(fields).Debug(">>>> SendZapi") - defer log.WithFields(fields).Debug("<<<< SendZapi") - } - - zapiCommand, err := r.ToXML() - if err != nil { - return nil, err - } - - var s = "" - if o.SVM == "" { - s = fmt.Sprintf(` - - %s - `, zapiCommand) - } else { - s = fmt.Sprintf(` - - %s - `, "vfiler=\""+o.SVM+"\"", zapiCommand) - } - if o.DebugTraceFlags["api"] { - log.Debugf("sending to '%s' xml: \n%s", o.ManagementLIF, s) - } - - url := "http://" + o.ManagementLIF + "/servlets/netapp.servlets.admin.XMLrequest_filer" - if o.Secure { - url = "https://" + o.ManagementLIF + "/servlets/netapp.servlets.admin.XMLrequest_filer" - } - if o.DebugTraceFlags["api"] { - log.Debugf("URL:> %s", url) - } - - b := []byte(s) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(b)) - req.Header.Set("Content-Type", "application/xml") - req.SetBasicAuth(o.Username, o.Password) - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - - client := &http.Client{ - Transport: tr, - Timeout: time.Duration(tridentconfig.StorageAPITimeoutSeconds * time.Second), - } - response, err := client.Do(req) - if err != nil { - return nil, err - } else if response.StatusCode == 401 { - return nil, errors.New("response code 401 (Unauthorized): incorrect or missing credentials") - } - - if o.DebugTraceFlags["api"] { - log.Debugf("response Status: %s", response.Status) - log.Debugf("response Headers: %s", response.Header) - } - - return response, err -} - -// ExecuteUsing converts this object to a ZAPI XML representation and uses the supplied ZapiRunner to send to a filer -func (o *ZapiRunner) ExecuteUsing(z ZAPIRequest, requestType string, v interface{}) (interface{}, error) { - return o.ExecuteWithoutIteration(z, requestType, v) -} - -// ExecuteWithoutIteration does not attempt to perform any nextTag style iteration -func (o *ZapiRunner) ExecuteWithoutIteration(z ZAPIRequest, requestType string, v interface{}) (interface{}, error) { - - if o.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "ExecuteUsing", "Type": requestType} - log.WithFields(fields).Debug(">>>> ExecuteUsing") - defer log.WithFields(fields).Debug("<<<< ExecuteUsing") - } - - resp, err := o.SendZapi(z) - if err != nil { - log.Errorf("API invocation failed. %v", err.Error()) - return nil, err - } - defer resp.Body.Close() - body, readErr := ioutil.ReadAll(resp.Body) - if readErr != nil { - log.Errorf("Error reading response body. %v", readErr.Error()) - return nil, readErr - } - if o.DebugTraceFlags["api"] { - log.Debugf("response Body:\n%s", string(body)) - } - - //unmarshalErr := xml.Unmarshal(body, &v) - unmarshalErr := xml.Unmarshal(body, v) - if unmarshalErr != nil { - log.WithField("body", string(body)).Warnf("Error unmarshaling response body. %v", unmarshalErr.Error()) - } - if o.DebugTraceFlags["api"] { - log.Debugf("%s result:\n%v", requestType, v) - } - - return v, nil -} - -// ToString implements a String() function via reflection -func ToString(val reflect.Value) string { - if reflect.TypeOf(val).Kind() == reflect.Ptr { - val = reflect.Indirect(val) - } - - var buffer bytes.Buffer - if reflect.ValueOf(val).Kind() == reflect.Struct { - for i := 0; i < val.Type().NumField(); i++ { - fieldName := val.Type().Field(i).Name - fieldType := val.Type().Field(i) - fieldTag := fieldType.Tag - fieldValue := val.Field(i) - - switch val.Field(i).Kind() { - case reflect.Ptr: - fieldValue = reflect.Indirect(val.Field(i)) - default: - fieldValue = val.Field(i) - } - - if fieldTag != "" { - xmlTag := fieldTag.Get("xml") - if xmlTag != "" { - fieldName = xmlTag - } - } - - if fieldValue.IsValid() { - buffer.WriteString(fmt.Sprintf("%s: %v\n", fieldName, fieldValue)) - } else { - buffer.WriteString(fmt.Sprintf("%s: %v\n", fieldName, "nil")) - } - } - } - - return buffer.String() -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/errors.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/errors.go deleted file mode 100644 index 4979f8090..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/errors.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package azgo - -const EONTAPI_EEXIST = "17" -const EONTAPI_EVOLOPNOTSUPP = "160" -const EVDISK_ERROR_NO_SUCH_INITGROUP = "9003" -const EVDISK_ERROR_INITGROUP_EXISTS = "9004" -const EVDISK_ERROR_NODE_NOT_IN_INITGROUP = "9007" -const EVDISK_ERROR_NO_SUCH_ATTRIBUTE = "9011" -const EVDISK_ERROR_VDISK_EXISTS = "9012" -const EVDISK_ERROR_INITGROUP_HAS_NODE = "9008" -const EVDISK_ERROR_VDISK_NOT_ENABLED = "9014" -const EVDISK_ERROR_VDISK_NOT_DISABLED = "9015" -const EVDISK_ERROR_INITGROUP_HAS_VDISK = "9023" -const EVDISK_ERROR_INITGROUP_HAS_LUN = "9024" -const EVDISK_ERROR_INITGROUP_MAPS_EXIST = "9029" -const EVDISK_ERROR_NO_SUCH_VOLUME = "9036" -const EVDISK_ERROR_SIZE_TOO_SMALL = "9041" -const EAPIERROR = "13001" -const EAPIPRIVILEGE = "13003" -const EAPINOTFOUND = "13005" -const ESNAPSHOTBUSY = "13024" -const EVOLUMEDOESNOTEXIST = "13040" -const EINTERNALERROR = "13114" -const EINVALIDINPUTERROR = "13115" -const EDUPLICATEENTRY = "13130" -const EAGGRDOESNOTEXIST = "14420" -const EOBJECTNOTFOUND = "15661" diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-UUID.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-UUID.go deleted file mode 100644 index 8fcd236ac..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-UUID.go +++ /dev/null @@ -1,7 +0,0 @@ -package azgo - -// UuidType is a structure to represent a uuid ZAPI object -type UuidType = string - -// UUIDType is a structure to represent a uuid ZAPI object -type UUIDType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-access-protocol.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-access-protocol.go deleted file mode 100644 index aec894bcc..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-access-protocol.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// AccessProtocolType is a structure to represent a access-protocol ZAPI object -type AccessProtocolType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-64bit-upgrade-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-64bit-upgrade-attributes.go deleted file mode 100644 index 0ef5ac0b1..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-64bit-upgrade-attributes.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// Aggr64bitUpgradeAttributesType is a structure to represent a aggr-64bit-upgrade-attributes ZAPI object -type Aggr64bitUpgradeAttributesType struct { - XMLName xml.Name `xml:"aggr-64bit-upgrade-attributes"` - AggrCheckAttributesPtr *AggrCheckAttributesType `xml:"aggr-check-attributes"` - AggrStartAttributesPtr *AggrStartAttributesType `xml:"aggr-start-attributes"` - AggrStatusAttributesPtr *AggrStatusAttributesType `xml:"aggr-status-attributes"` -} - -// NewAggr64bitUpgradeAttributesType is a factory method for creating new instances of Aggr64bitUpgradeAttributesType objects -func NewAggr64bitUpgradeAttributesType() *Aggr64bitUpgradeAttributesType { - return &Aggr64bitUpgradeAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *Aggr64bitUpgradeAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o Aggr64bitUpgradeAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggrCheckAttributes is a 'getter' method -func (o *Aggr64bitUpgradeAttributesType) AggrCheckAttributes() AggrCheckAttributesType { - r := *o.AggrCheckAttributesPtr - return r -} - -// SetAggrCheckAttributes is a fluent style 'setter' method that can be chained -func (o *Aggr64bitUpgradeAttributesType) SetAggrCheckAttributes(newValue AggrCheckAttributesType) *Aggr64bitUpgradeAttributesType { - o.AggrCheckAttributesPtr = &newValue - return o -} - -// AggrStartAttributes is a 'getter' method -func (o *Aggr64bitUpgradeAttributesType) AggrStartAttributes() AggrStartAttributesType { - r := *o.AggrStartAttributesPtr - return r -} - -// SetAggrStartAttributes is a fluent style 'setter' method that can be chained -func (o *Aggr64bitUpgradeAttributesType) SetAggrStartAttributes(newValue AggrStartAttributesType) *Aggr64bitUpgradeAttributesType { - o.AggrStartAttributesPtr = &newValue - return o -} - -// AggrStatusAttributes is a 'getter' method -func (o *Aggr64bitUpgradeAttributesType) AggrStatusAttributes() AggrStatusAttributesType { - r := *o.AggrStatusAttributesPtr - return r -} - -// SetAggrStatusAttributes is a fluent style 'setter' method that can be chained -func (o *Aggr64bitUpgradeAttributesType) SetAggrStatusAttributes(newValue AggrStatusAttributesType) *Aggr64bitUpgradeAttributesType { - o.AggrStatusAttributesPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-attributes.go deleted file mode 100644 index 90f97a977..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-attributes.go +++ /dev/null @@ -1,378 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrAttributesType is a structure to represent a aggr-attributes ZAPI object -type AggrAttributesType struct { - XMLName xml.Name `xml:"aggr-attributes"` - Aggr64bitUpgradeAttributesPtr *Aggr64bitUpgradeAttributesType `xml:"aggr-64bit-upgrade-attributes"` - AggrFsAttributesPtr *AggrFsAttributesType `xml:"aggr-fs-attributes"` - AggrInodeAttributesPtr *AggrInodeAttributesType `xml:"aggr-inode-attributes"` - AggrOwnershipAttributesPtr *AggrOwnershipAttributesType `xml:"aggr-ownership-attributes"` - AggrPerformanceAttributesPtr *AggrPerformanceAttributesType `xml:"aggr-performance-attributes"` - AggrRaidAttributesPtr *AggrRaidAttributesType `xml:"aggr-raid-attributes"` - AggrSnaplockAttributesPtr *AggrSnaplockAttributesType `xml:"aggr-snaplock-attributes"` - AggrSnapmirrorAttributesPtr *AggrSnapmirrorAttributesType `xml:"aggr-snapmirror-attributes"` - AggrSnapshotAttributesPtr *AggrSnapshotAttributesType `xml:"aggr-snapshot-attributes"` - AggrSpaceAttributesPtr *AggrSpaceAttributesType `xml:"aggr-space-attributes"` - AggrStripingAttributesPtr *AggrStripingAttributesType `xml:"aggr-striping-attributes"` - AggrVolumeCountAttributesPtr *AggrVolumeCountAttributesType `xml:"aggr-volume-count-attributes"` - AggrWaflironAttributesPtr *AggrWaflironAttributesType `xml:"aggr-wafliron-attributes"` - AggregateNamePtr *string `xml:"aggregate-name"` - AggregateUuidPtr *string `xml:"aggregate-uuid"` - AutobalanceAvailableThresholdPercentPtr *int `xml:"autobalance-available-threshold-percent"` - AutobalanceStatePtr *AutobalanceAggregateStateType `xml:"autobalance-state"` - AutobalanceStateChangeCounterPtr *int `xml:"autobalance-state-change-counter"` - AutobalanceUnbalancedThresholdPercentPtr *int `xml:"autobalance-unbalanced-threshold-percent"` - IsAutobalanceEligiblePtr *bool `xml:"is-autobalance-eligible"` - IsCftPrecommitPtr *bool `xml:"is-cft-precommit"` - IsObjectStoreAttachEligiblePtr *bool `xml:"is-object-store-attach-eligible"` - IsTransitionOutOfSpacePtr *bool `xml:"is-transition-out-of-space"` - NodesPtr *AggrAttributesTypeNodes `xml:"nodes"` - // work in progress - StripingTypePtr *string `xml:"striping-type"` -} - -// NewAggrAttributesType is a factory method for creating new instances of AggrAttributesType objects -func NewAggrAttributesType() *AggrAttributesType { - return &AggrAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Aggr64bitUpgradeAttributes is a 'getter' method -func (o *AggrAttributesType) Aggr64bitUpgradeAttributes() Aggr64bitUpgradeAttributesType { - r := *o.Aggr64bitUpgradeAttributesPtr - return r -} - -// SetAggr64bitUpgradeAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggr64bitUpgradeAttributes(newValue Aggr64bitUpgradeAttributesType) *AggrAttributesType { - o.Aggr64bitUpgradeAttributesPtr = &newValue - return o -} - -// AggrFsAttributes is a 'getter' method -func (o *AggrAttributesType) AggrFsAttributes() AggrFsAttributesType { - r := *o.AggrFsAttributesPtr - return r -} - -// SetAggrFsAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrFsAttributes(newValue AggrFsAttributesType) *AggrAttributesType { - o.AggrFsAttributesPtr = &newValue - return o -} - -// AggrInodeAttributes is a 'getter' method -func (o *AggrAttributesType) AggrInodeAttributes() AggrInodeAttributesType { - r := *o.AggrInodeAttributesPtr - return r -} - -// SetAggrInodeAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrInodeAttributes(newValue AggrInodeAttributesType) *AggrAttributesType { - o.AggrInodeAttributesPtr = &newValue - return o -} - -// AggrOwnershipAttributes is a 'getter' method -func (o *AggrAttributesType) AggrOwnershipAttributes() AggrOwnershipAttributesType { - r := *o.AggrOwnershipAttributesPtr - return r -} - -// SetAggrOwnershipAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrOwnershipAttributes(newValue AggrOwnershipAttributesType) *AggrAttributesType { - o.AggrOwnershipAttributesPtr = &newValue - return o -} - -// AggrPerformanceAttributes is a 'getter' method -func (o *AggrAttributesType) AggrPerformanceAttributes() AggrPerformanceAttributesType { - r := *o.AggrPerformanceAttributesPtr - return r -} - -// SetAggrPerformanceAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrPerformanceAttributes(newValue AggrPerformanceAttributesType) *AggrAttributesType { - o.AggrPerformanceAttributesPtr = &newValue - return o -} - -// AggrRaidAttributes is a 'getter' method -func (o *AggrAttributesType) AggrRaidAttributes() AggrRaidAttributesType { - r := *o.AggrRaidAttributesPtr - return r -} - -// SetAggrRaidAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrRaidAttributes(newValue AggrRaidAttributesType) *AggrAttributesType { - o.AggrRaidAttributesPtr = &newValue - return o -} - -// AggrSnaplockAttributes is a 'getter' method -func (o *AggrAttributesType) AggrSnaplockAttributes() AggrSnaplockAttributesType { - r := *o.AggrSnaplockAttributesPtr - return r -} - -// SetAggrSnaplockAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrSnaplockAttributes(newValue AggrSnaplockAttributesType) *AggrAttributesType { - o.AggrSnaplockAttributesPtr = &newValue - return o -} - -// AggrSnapmirrorAttributes is a 'getter' method -func (o *AggrAttributesType) AggrSnapmirrorAttributes() AggrSnapmirrorAttributesType { - r := *o.AggrSnapmirrorAttributesPtr - return r -} - -// SetAggrSnapmirrorAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrSnapmirrorAttributes(newValue AggrSnapmirrorAttributesType) *AggrAttributesType { - o.AggrSnapmirrorAttributesPtr = &newValue - return o -} - -// AggrSnapshotAttributes is a 'getter' method -func (o *AggrAttributesType) AggrSnapshotAttributes() AggrSnapshotAttributesType { - r := *o.AggrSnapshotAttributesPtr - return r -} - -// SetAggrSnapshotAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrSnapshotAttributes(newValue AggrSnapshotAttributesType) *AggrAttributesType { - o.AggrSnapshotAttributesPtr = &newValue - return o -} - -// AggrSpaceAttributes is a 'getter' method -func (o *AggrAttributesType) AggrSpaceAttributes() AggrSpaceAttributesType { - r := *o.AggrSpaceAttributesPtr - return r -} - -// SetAggrSpaceAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrSpaceAttributes(newValue AggrSpaceAttributesType) *AggrAttributesType { - o.AggrSpaceAttributesPtr = &newValue - return o -} - -// AggrStripingAttributes is a 'getter' method -func (o *AggrAttributesType) AggrStripingAttributes() AggrStripingAttributesType { - r := *o.AggrStripingAttributesPtr - return r -} - -// SetAggrStripingAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrStripingAttributes(newValue AggrStripingAttributesType) *AggrAttributesType { - o.AggrStripingAttributesPtr = &newValue - return o -} - -// AggrVolumeCountAttributes is a 'getter' method -func (o *AggrAttributesType) AggrVolumeCountAttributes() AggrVolumeCountAttributesType { - r := *o.AggrVolumeCountAttributesPtr - return r -} - -// SetAggrVolumeCountAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrVolumeCountAttributes(newValue AggrVolumeCountAttributesType) *AggrAttributesType { - o.AggrVolumeCountAttributesPtr = &newValue - return o -} - -// AggrWaflironAttributes is a 'getter' method -func (o *AggrAttributesType) AggrWaflironAttributes() AggrWaflironAttributesType { - r := *o.AggrWaflironAttributesPtr - return r -} - -// SetAggrWaflironAttributes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggrWaflironAttributes(newValue AggrWaflironAttributesType) *AggrAttributesType { - o.AggrWaflironAttributesPtr = &newValue - return o -} - -// AggregateName is a 'getter' method -func (o *AggrAttributesType) AggregateName() string { - r := *o.AggregateNamePtr - return r -} - -// SetAggregateName is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggregateName(newValue string) *AggrAttributesType { - o.AggregateNamePtr = &newValue - return o -} - -// AggregateUuid is a 'getter' method -func (o *AggrAttributesType) AggregateUuid() string { - r := *o.AggregateUuidPtr - return r -} - -// SetAggregateUuid is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAggregateUuid(newValue string) *AggrAttributesType { - o.AggregateUuidPtr = &newValue - return o -} - -// AutobalanceAvailableThresholdPercent is a 'getter' method -func (o *AggrAttributesType) AutobalanceAvailableThresholdPercent() int { - r := *o.AutobalanceAvailableThresholdPercentPtr - return r -} - -// SetAutobalanceAvailableThresholdPercent is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAutobalanceAvailableThresholdPercent(newValue int) *AggrAttributesType { - o.AutobalanceAvailableThresholdPercentPtr = &newValue - return o -} - -// AutobalanceState is a 'getter' method -func (o *AggrAttributesType) AutobalanceState() AutobalanceAggregateStateType { - r := *o.AutobalanceStatePtr - return r -} - -// SetAutobalanceState is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAutobalanceState(newValue AutobalanceAggregateStateType) *AggrAttributesType { - o.AutobalanceStatePtr = &newValue - return o -} - -// AutobalanceStateChangeCounter is a 'getter' method -func (o *AggrAttributesType) AutobalanceStateChangeCounter() int { - r := *o.AutobalanceStateChangeCounterPtr - return r -} - -// SetAutobalanceStateChangeCounter is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAutobalanceStateChangeCounter(newValue int) *AggrAttributesType { - o.AutobalanceStateChangeCounterPtr = &newValue - return o -} - -// AutobalanceUnbalancedThresholdPercent is a 'getter' method -func (o *AggrAttributesType) AutobalanceUnbalancedThresholdPercent() int { - r := *o.AutobalanceUnbalancedThresholdPercentPtr - return r -} - -// SetAutobalanceUnbalancedThresholdPercent is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetAutobalanceUnbalancedThresholdPercent(newValue int) *AggrAttributesType { - o.AutobalanceUnbalancedThresholdPercentPtr = &newValue - return o -} - -// IsAutobalanceEligible is a 'getter' method -func (o *AggrAttributesType) IsAutobalanceEligible() bool { - r := *o.IsAutobalanceEligiblePtr - return r -} - -// SetIsAutobalanceEligible is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetIsAutobalanceEligible(newValue bool) *AggrAttributesType { - o.IsAutobalanceEligiblePtr = &newValue - return o -} - -// IsCftPrecommit is a 'getter' method -func (o *AggrAttributesType) IsCftPrecommit() bool { - r := *o.IsCftPrecommitPtr - return r -} - -// SetIsCftPrecommit is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetIsCftPrecommit(newValue bool) *AggrAttributesType { - o.IsCftPrecommitPtr = &newValue - return o -} - -// IsObjectStoreAttachEligible is a 'getter' method -func (o *AggrAttributesType) IsObjectStoreAttachEligible() bool { - r := *o.IsObjectStoreAttachEligiblePtr - return r -} - -// SetIsObjectStoreAttachEligible is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetIsObjectStoreAttachEligible(newValue bool) *AggrAttributesType { - o.IsObjectStoreAttachEligiblePtr = &newValue - return o -} - -// IsTransitionOutOfSpace is a 'getter' method -func (o *AggrAttributesType) IsTransitionOutOfSpace() bool { - r := *o.IsTransitionOutOfSpacePtr - return r -} - -// SetIsTransitionOutOfSpace is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetIsTransitionOutOfSpace(newValue bool) *AggrAttributesType { - o.IsTransitionOutOfSpacePtr = &newValue - return o -} - -// AggrAttributesTypeNodes is a wrapper -type AggrAttributesTypeNodes struct { - XMLName xml.Name `xml:"nodes"` - NodeNamePtr []NodeNameType `xml:"node-name"` -} - -// NodeName is a 'getter' method -func (o *AggrAttributesTypeNodes) NodeName() []NodeNameType { - r := o.NodeNamePtr - return r -} - -// SetNodeName is a fluent style 'setter' method that can be chained -func (o *AggrAttributesTypeNodes) SetNodeName(newValue []NodeNameType) *AggrAttributesTypeNodes { - newSlice := make([]NodeNameType, len(newValue)) - copy(newSlice, newValue) - o.NodeNamePtr = newSlice - return o -} - -// Nodes is a 'getter' method -func (o *AggrAttributesType) Nodes() AggrAttributesTypeNodes { - r := *o.NodesPtr - return r -} - -// SetNodes is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetNodes(newValue AggrAttributesTypeNodes) *AggrAttributesType { - o.NodesPtr = &newValue - return o -} - -// StripingType is a 'getter' method -func (o *AggrAttributesType) StripingType() string { - r := *o.StripingTypePtr - return r -} - -// SetStripingType is a fluent style 'setter' method that can be chained -func (o *AggrAttributesType) SetStripingType(newValue string) *AggrAttributesType { - o.StripingTypePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-check-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-check-attributes.go deleted file mode 100644 index 0ae1f1c9c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-check-attributes.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrCheckAttributesType is a structure to represent a aggr-check-attributes ZAPI object -type AggrCheckAttributesType struct { - XMLName xml.Name `xml:"aggr-check-attributes"` - AddedSpacePtr *int `xml:"added-space"` - CheckLastErrnoPtr *int `xml:"check-last-errno"` - CookiePtr *int `xml:"cookie"` - IsSpaceEstimateCompletePtr *bool `xml:"is-space-estimate-complete"` -} - -// NewAggrCheckAttributesType is a factory method for creating new instances of AggrCheckAttributesType objects -func NewAggrCheckAttributesType() *AggrCheckAttributesType { - return &AggrCheckAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrCheckAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrCheckAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AddedSpace is a 'getter' method -func (o *AggrCheckAttributesType) AddedSpace() int { - r := *o.AddedSpacePtr - return r -} - -// SetAddedSpace is a fluent style 'setter' method that can be chained -func (o *AggrCheckAttributesType) SetAddedSpace(newValue int) *AggrCheckAttributesType { - o.AddedSpacePtr = &newValue - return o -} - -// CheckLastErrno is a 'getter' method -func (o *AggrCheckAttributesType) CheckLastErrno() int { - r := *o.CheckLastErrnoPtr - return r -} - -// SetCheckLastErrno is a fluent style 'setter' method that can be chained -func (o *AggrCheckAttributesType) SetCheckLastErrno(newValue int) *AggrCheckAttributesType { - o.CheckLastErrnoPtr = &newValue - return o -} - -// Cookie is a 'getter' method -func (o *AggrCheckAttributesType) Cookie() int { - r := *o.CookiePtr - return r -} - -// SetCookie is a fluent style 'setter' method that can be chained -func (o *AggrCheckAttributesType) SetCookie(newValue int) *AggrCheckAttributesType { - o.CookiePtr = &newValue - return o -} - -// IsSpaceEstimateComplete is a 'getter' method -func (o *AggrCheckAttributesType) IsSpaceEstimateComplete() bool { - r := *o.IsSpaceEstimateCompletePtr - return r -} - -// SetIsSpaceEstimateComplete is a fluent style 'setter' method that can be chained -func (o *AggrCheckAttributesType) SetIsSpaceEstimateComplete(newValue bool) *AggrCheckAttributesType { - o.IsSpaceEstimateCompletePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-fs-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-fs-attributes.go deleted file mode 100644 index 2332ddd3f..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-fs-attributes.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrFsAttributesType is a structure to represent a aggr-fs-attributes ZAPI object -type AggrFsAttributesType struct { - XMLName xml.Name `xml:"aggr-fs-attributes"` - BlockTypePtr *string `xml:"block-type"` - FsidPtr *int `xml:"fsid"` - TypePtr *string `xml:"type"` -} - -// NewAggrFsAttributesType is a factory method for creating new instances of AggrFsAttributesType objects -func NewAggrFsAttributesType() *AggrFsAttributesType { - return &AggrFsAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrFsAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrFsAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BlockType is a 'getter' method -func (o *AggrFsAttributesType) BlockType() string { - r := *o.BlockTypePtr - return r -} - -// SetBlockType is a fluent style 'setter' method that can be chained -func (o *AggrFsAttributesType) SetBlockType(newValue string) *AggrFsAttributesType { - o.BlockTypePtr = &newValue - return o -} - -// Fsid is a 'getter' method -func (o *AggrFsAttributesType) Fsid() int { - r := *o.FsidPtr - return r -} - -// SetFsid is a fluent style 'setter' method that can be chained -func (o *AggrFsAttributesType) SetFsid(newValue int) *AggrFsAttributesType { - o.FsidPtr = &newValue - return o -} - -// Type is a 'getter' method -func (o *AggrFsAttributesType) Type() string { - r := *o.TypePtr - return r -} - -// SetType is a fluent style 'setter' method that can be chained -func (o *AggrFsAttributesType) SetType(newValue string) *AggrFsAttributesType { - o.TypePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-inode-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-inode-attributes.go deleted file mode 100644 index c3eda167f..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-inode-attributes.go +++ /dev/null @@ -1,162 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrInodeAttributesType is a structure to represent a aggr-inode-attributes ZAPI object -type AggrInodeAttributesType struct { - XMLName xml.Name `xml:"aggr-inode-attributes"` - FilesPrivateUsedPtr *int `xml:"files-private-used"` - FilesTotalPtr *int `xml:"files-total"` - FilesUsedPtr *int `xml:"files-used"` - InodefilePrivateCapacityPtr *int `xml:"inodefile-private-capacity"` - InodefilePublicCapacityPtr *int `xml:"inodefile-public-capacity"` - InofileVersionPtr *int `xml:"inofile-version"` - MaxfilesAvailablePtr *int `xml:"maxfiles-available"` - MaxfilesPossiblePtr *int `xml:"maxfiles-possible"` - MaxfilesUsedPtr *int `xml:"maxfiles-used"` - PercentInodeUsedCapacityPtr *int `xml:"percent-inode-used-capacity"` -} - -// NewAggrInodeAttributesType is a factory method for creating new instances of AggrInodeAttributesType objects -func NewAggrInodeAttributesType() *AggrInodeAttributesType { - return &AggrInodeAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrInodeAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrInodeAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// FilesPrivateUsed is a 'getter' method -func (o *AggrInodeAttributesType) FilesPrivateUsed() int { - r := *o.FilesPrivateUsedPtr - return r -} - -// SetFilesPrivateUsed is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetFilesPrivateUsed(newValue int) *AggrInodeAttributesType { - o.FilesPrivateUsedPtr = &newValue - return o -} - -// FilesTotal is a 'getter' method -func (o *AggrInodeAttributesType) FilesTotal() int { - r := *o.FilesTotalPtr - return r -} - -// SetFilesTotal is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetFilesTotal(newValue int) *AggrInodeAttributesType { - o.FilesTotalPtr = &newValue - return o -} - -// FilesUsed is a 'getter' method -func (o *AggrInodeAttributesType) FilesUsed() int { - r := *o.FilesUsedPtr - return r -} - -// SetFilesUsed is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetFilesUsed(newValue int) *AggrInodeAttributesType { - o.FilesUsedPtr = &newValue - return o -} - -// InodefilePrivateCapacity is a 'getter' method -func (o *AggrInodeAttributesType) InodefilePrivateCapacity() int { - r := *o.InodefilePrivateCapacityPtr - return r -} - -// SetInodefilePrivateCapacity is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetInodefilePrivateCapacity(newValue int) *AggrInodeAttributesType { - o.InodefilePrivateCapacityPtr = &newValue - return o -} - -// InodefilePublicCapacity is a 'getter' method -func (o *AggrInodeAttributesType) InodefilePublicCapacity() int { - r := *o.InodefilePublicCapacityPtr - return r -} - -// SetInodefilePublicCapacity is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetInodefilePublicCapacity(newValue int) *AggrInodeAttributesType { - o.InodefilePublicCapacityPtr = &newValue - return o -} - -// InofileVersion is a 'getter' method -func (o *AggrInodeAttributesType) InofileVersion() int { - r := *o.InofileVersionPtr - return r -} - -// SetInofileVersion is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetInofileVersion(newValue int) *AggrInodeAttributesType { - o.InofileVersionPtr = &newValue - return o -} - -// MaxfilesAvailable is a 'getter' method -func (o *AggrInodeAttributesType) MaxfilesAvailable() int { - r := *o.MaxfilesAvailablePtr - return r -} - -// SetMaxfilesAvailable is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetMaxfilesAvailable(newValue int) *AggrInodeAttributesType { - o.MaxfilesAvailablePtr = &newValue - return o -} - -// MaxfilesPossible is a 'getter' method -func (o *AggrInodeAttributesType) MaxfilesPossible() int { - r := *o.MaxfilesPossiblePtr - return r -} - -// SetMaxfilesPossible is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetMaxfilesPossible(newValue int) *AggrInodeAttributesType { - o.MaxfilesPossiblePtr = &newValue - return o -} - -// MaxfilesUsed is a 'getter' method -func (o *AggrInodeAttributesType) MaxfilesUsed() int { - r := *o.MaxfilesUsedPtr - return r -} - -// SetMaxfilesUsed is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetMaxfilesUsed(newValue int) *AggrInodeAttributesType { - o.MaxfilesUsedPtr = &newValue - return o -} - -// PercentInodeUsedCapacity is a 'getter' method -func (o *AggrInodeAttributesType) PercentInodeUsedCapacity() int { - r := *o.PercentInodeUsedCapacityPtr - return r -} - -// SetPercentInodeUsedCapacity is a fluent style 'setter' method that can be chained -func (o *AggrInodeAttributesType) SetPercentInodeUsedCapacity(newValue int) *AggrInodeAttributesType { - o.PercentInodeUsedCapacityPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-name.go deleted file mode 100644 index 318d46077..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-name.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// AggrNameType is a structure to represent a aggr-name ZAPI object -type AggrNameType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-ownership-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-ownership-attributes.go deleted file mode 100644 index 029901a9b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-ownership-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrOwnershipAttributesType is a structure to represent a aggr-ownership-attributes ZAPI object -type AggrOwnershipAttributesType struct { - XMLName xml.Name `xml:"aggr-ownership-attributes"` - ClusterPtr *string `xml:"cluster"` - DrHomeIdPtr *int `xml:"dr-home-id"` - DrHomeNamePtr *string `xml:"dr-home-name"` - HomeIdPtr *int `xml:"home-id"` - HomeNamePtr *string `xml:"home-name"` - OwnerIdPtr *int `xml:"owner-id"` - OwnerNamePtr *string `xml:"owner-name"` -} - -// NewAggrOwnershipAttributesType is a factory method for creating new instances of AggrOwnershipAttributesType objects -func NewAggrOwnershipAttributesType() *AggrOwnershipAttributesType { - return &AggrOwnershipAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrOwnershipAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrOwnershipAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Cluster is a 'getter' method -func (o *AggrOwnershipAttributesType) Cluster() string { - r := *o.ClusterPtr - return r -} - -// SetCluster is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetCluster(newValue string) *AggrOwnershipAttributesType { - o.ClusterPtr = &newValue - return o -} - -// DrHomeId is a 'getter' method -func (o *AggrOwnershipAttributesType) DrHomeId() int { - r := *o.DrHomeIdPtr - return r -} - -// SetDrHomeId is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetDrHomeId(newValue int) *AggrOwnershipAttributesType { - o.DrHomeIdPtr = &newValue - return o -} - -// DrHomeName is a 'getter' method -func (o *AggrOwnershipAttributesType) DrHomeName() string { - r := *o.DrHomeNamePtr - return r -} - -// SetDrHomeName is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetDrHomeName(newValue string) *AggrOwnershipAttributesType { - o.DrHomeNamePtr = &newValue - return o -} - -// HomeId is a 'getter' method -func (o *AggrOwnershipAttributesType) HomeId() int { - r := *o.HomeIdPtr - return r -} - -// SetHomeId is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetHomeId(newValue int) *AggrOwnershipAttributesType { - o.HomeIdPtr = &newValue - return o -} - -// HomeName is a 'getter' method -func (o *AggrOwnershipAttributesType) HomeName() string { - r := *o.HomeNamePtr - return r -} - -// SetHomeName is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetHomeName(newValue string) *AggrOwnershipAttributesType { - o.HomeNamePtr = &newValue - return o -} - -// OwnerId is a 'getter' method -func (o *AggrOwnershipAttributesType) OwnerId() int { - r := *o.OwnerIdPtr - return r -} - -// SetOwnerId is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetOwnerId(newValue int) *AggrOwnershipAttributesType { - o.OwnerIdPtr = &newValue - return o -} - -// OwnerName is a 'getter' method -func (o *AggrOwnershipAttributesType) OwnerName() string { - r := *o.OwnerNamePtr - return r -} - -// SetOwnerName is a fluent style 'setter' method that can be chained -func (o *AggrOwnershipAttributesType) SetOwnerName(newValue string) *AggrOwnershipAttributesType { - o.OwnerNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-performance-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-performance-attributes.go deleted file mode 100644 index e95a76202..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-performance-attributes.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrPerformanceAttributesType is a structure to represent a aggr-performance-attributes ZAPI object -type AggrPerformanceAttributesType struct { - XMLName xml.Name `xml:"aggr-performance-attributes"` - FreeSpaceReallocPtr *string `xml:"free-space-realloc"` - MaxWriteAllocBlocksPtr *int `xml:"max-write-alloc-blocks"` - SingleInstanceDataLoggingPtr *string `xml:"single-instance-data-logging"` -} - -// NewAggrPerformanceAttributesType is a factory method for creating new instances of AggrPerformanceAttributesType objects -func NewAggrPerformanceAttributesType() *AggrPerformanceAttributesType { - return &AggrPerformanceAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrPerformanceAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrPerformanceAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// FreeSpaceRealloc is a 'getter' method -func (o *AggrPerformanceAttributesType) FreeSpaceRealloc() string { - r := *o.FreeSpaceReallocPtr - return r -} - -// SetFreeSpaceRealloc is a fluent style 'setter' method that can be chained -func (o *AggrPerformanceAttributesType) SetFreeSpaceRealloc(newValue string) *AggrPerformanceAttributesType { - o.FreeSpaceReallocPtr = &newValue - return o -} - -// MaxWriteAllocBlocks is a 'getter' method -func (o *AggrPerformanceAttributesType) MaxWriteAllocBlocks() int { - r := *o.MaxWriteAllocBlocksPtr - return r -} - -// SetMaxWriteAllocBlocks is a fluent style 'setter' method that can be chained -func (o *AggrPerformanceAttributesType) SetMaxWriteAllocBlocks(newValue int) *AggrPerformanceAttributesType { - o.MaxWriteAllocBlocksPtr = &newValue - return o -} - -// SingleInstanceDataLogging is a 'getter' method -func (o *AggrPerformanceAttributesType) SingleInstanceDataLogging() string { - r := *o.SingleInstanceDataLoggingPtr - return r -} - -// SetSingleInstanceDataLogging is a fluent style 'setter' method that can be chained -func (o *AggrPerformanceAttributesType) SetSingleInstanceDataLogging(newValue string) *AggrPerformanceAttributesType { - o.SingleInstanceDataLoggingPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-raid-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-raid-attributes.go deleted file mode 100644 index 1a0088ef2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-raid-attributes.go +++ /dev/null @@ -1,404 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrRaidAttributesType is a structure to represent a aggr-raid-attributes ZAPI object -type AggrRaidAttributesType struct { - XMLName xml.Name `xml:"aggr-raid-attributes"` - AggregateTypePtr *string `xml:"aggregate-type"` - CacheRaidGroupSizePtr *int `xml:"cache-raid-group-size"` - ChecksumStatusPtr *string `xml:"checksum-status"` - ChecksumStylePtr *string `xml:"checksum-style"` - DiskCountPtr *int `xml:"disk-count"` - EncryptionKeyIdPtr *string `xml:"encryption-key-id"` - HaPolicyPtr *string `xml:"ha-policy"` - HasLocalRootPtr *bool `xml:"has-local-root"` - HasPartnerRootPtr *bool `xml:"has-partner-root"` - IsChecksumEnabledPtr *bool `xml:"is-checksum-enabled"` - IsCompositePtr *bool `xml:"is-composite"` - IsEncryptedPtr *bool `xml:"is-encrypted"` - IsHybridPtr *bool `xml:"is-hybrid"` - IsHybridEnabledPtr *bool `xml:"is-hybrid-enabled"` - IsInconsistentPtr *bool `xml:"is-inconsistent"` - IsMirroredPtr *bool `xml:"is-mirrored"` - IsRootAggregatePtr *bool `xml:"is-root-aggregate"` - MirrorStatusPtr *string `xml:"mirror-status"` - MountStatePtr *string `xml:"mount-state"` - PlexCountPtr *int `xml:"plex-count"` - PlexesPtr *AggrRaidAttributesTypePlexes `xml:"plexes"` - // work in progress - RaidLostWriteStatePtr *string `xml:"raid-lost-write-state"` - RaidSizePtr *int `xml:"raid-size"` - RaidStatusPtr *string `xml:"raid-status"` - RaidTypePtr *string `xml:"raid-type"` - StatePtr *string `xml:"state"` - UsesSharedDisksPtr *bool `xml:"uses-shared-disks"` -} - -// NewAggrRaidAttributesType is a factory method for creating new instances of AggrRaidAttributesType objects -func NewAggrRaidAttributesType() *AggrRaidAttributesType { - return &AggrRaidAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrRaidAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrRaidAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggregateType is a 'getter' method -func (o *AggrRaidAttributesType) AggregateType() string { - r := *o.AggregateTypePtr - return r -} - -// SetAggregateType is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetAggregateType(newValue string) *AggrRaidAttributesType { - o.AggregateTypePtr = &newValue - return o -} - -// CacheRaidGroupSize is a 'getter' method -func (o *AggrRaidAttributesType) CacheRaidGroupSize() int { - r := *o.CacheRaidGroupSizePtr - return r -} - -// SetCacheRaidGroupSize is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetCacheRaidGroupSize(newValue int) *AggrRaidAttributesType { - o.CacheRaidGroupSizePtr = &newValue - return o -} - -// ChecksumStatus is a 'getter' method -func (o *AggrRaidAttributesType) ChecksumStatus() string { - r := *o.ChecksumStatusPtr - return r -} - -// SetChecksumStatus is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetChecksumStatus(newValue string) *AggrRaidAttributesType { - o.ChecksumStatusPtr = &newValue - return o -} - -// ChecksumStyle is a 'getter' method -func (o *AggrRaidAttributesType) ChecksumStyle() string { - r := *o.ChecksumStylePtr - return r -} - -// SetChecksumStyle is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetChecksumStyle(newValue string) *AggrRaidAttributesType { - o.ChecksumStylePtr = &newValue - return o -} - -// DiskCount is a 'getter' method -func (o *AggrRaidAttributesType) DiskCount() int { - r := *o.DiskCountPtr - return r -} - -// SetDiskCount is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetDiskCount(newValue int) *AggrRaidAttributesType { - o.DiskCountPtr = &newValue - return o -} - -// EncryptionKeyId is a 'getter' method -func (o *AggrRaidAttributesType) EncryptionKeyId() string { - r := *o.EncryptionKeyIdPtr - return r -} - -// SetEncryptionKeyId is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetEncryptionKeyId(newValue string) *AggrRaidAttributesType { - o.EncryptionKeyIdPtr = &newValue - return o -} - -// HaPolicy is a 'getter' method -func (o *AggrRaidAttributesType) HaPolicy() string { - r := *o.HaPolicyPtr - return r -} - -// SetHaPolicy is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetHaPolicy(newValue string) *AggrRaidAttributesType { - o.HaPolicyPtr = &newValue - return o -} - -// HasLocalRoot is a 'getter' method -func (o *AggrRaidAttributesType) HasLocalRoot() bool { - r := *o.HasLocalRootPtr - return r -} - -// SetHasLocalRoot is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetHasLocalRoot(newValue bool) *AggrRaidAttributesType { - o.HasLocalRootPtr = &newValue - return o -} - -// HasPartnerRoot is a 'getter' method -func (o *AggrRaidAttributesType) HasPartnerRoot() bool { - r := *o.HasPartnerRootPtr - return r -} - -// SetHasPartnerRoot is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetHasPartnerRoot(newValue bool) *AggrRaidAttributesType { - o.HasPartnerRootPtr = &newValue - return o -} - -// IsChecksumEnabled is a 'getter' method -func (o *AggrRaidAttributesType) IsChecksumEnabled() bool { - r := *o.IsChecksumEnabledPtr - return r -} - -// SetIsChecksumEnabled is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsChecksumEnabled(newValue bool) *AggrRaidAttributesType { - o.IsChecksumEnabledPtr = &newValue - return o -} - -// IsComposite is a 'getter' method -func (o *AggrRaidAttributesType) IsComposite() bool { - r := *o.IsCompositePtr - return r -} - -// SetIsComposite is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsComposite(newValue bool) *AggrRaidAttributesType { - o.IsCompositePtr = &newValue - return o -} - -// IsEncrypted is a 'getter' method -func (o *AggrRaidAttributesType) IsEncrypted() bool { - r := *o.IsEncryptedPtr - return r -} - -// SetIsEncrypted is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsEncrypted(newValue bool) *AggrRaidAttributesType { - o.IsEncryptedPtr = &newValue - return o -} - -// IsHybrid is a 'getter' method -func (o *AggrRaidAttributesType) IsHybrid() bool { - r := *o.IsHybridPtr - return r -} - -// SetIsHybrid is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsHybrid(newValue bool) *AggrRaidAttributesType { - o.IsHybridPtr = &newValue - return o -} - -// IsHybridEnabled is a 'getter' method -func (o *AggrRaidAttributesType) IsHybridEnabled() bool { - r := *o.IsHybridEnabledPtr - return r -} - -// SetIsHybridEnabled is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsHybridEnabled(newValue bool) *AggrRaidAttributesType { - o.IsHybridEnabledPtr = &newValue - return o -} - -// IsInconsistent is a 'getter' method -func (o *AggrRaidAttributesType) IsInconsistent() bool { - r := *o.IsInconsistentPtr - return r -} - -// SetIsInconsistent is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsInconsistent(newValue bool) *AggrRaidAttributesType { - o.IsInconsistentPtr = &newValue - return o -} - -// IsMirrored is a 'getter' method -func (o *AggrRaidAttributesType) IsMirrored() bool { - r := *o.IsMirroredPtr - return r -} - -// SetIsMirrored is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsMirrored(newValue bool) *AggrRaidAttributesType { - o.IsMirroredPtr = &newValue - return o -} - -// IsRootAggregate is a 'getter' method -func (o *AggrRaidAttributesType) IsRootAggregate() bool { - r := *o.IsRootAggregatePtr - return r -} - -// SetIsRootAggregate is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetIsRootAggregate(newValue bool) *AggrRaidAttributesType { - o.IsRootAggregatePtr = &newValue - return o -} - -// MirrorStatus is a 'getter' method -func (o *AggrRaidAttributesType) MirrorStatus() string { - r := *o.MirrorStatusPtr - return r -} - -// SetMirrorStatus is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetMirrorStatus(newValue string) *AggrRaidAttributesType { - o.MirrorStatusPtr = &newValue - return o -} - -// MountState is a 'getter' method -func (o *AggrRaidAttributesType) MountState() string { - r := *o.MountStatePtr - return r -} - -// SetMountState is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetMountState(newValue string) *AggrRaidAttributesType { - o.MountStatePtr = &newValue - return o -} - -// PlexCount is a 'getter' method -func (o *AggrRaidAttributesType) PlexCount() int { - r := *o.PlexCountPtr - return r -} - -// SetPlexCount is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetPlexCount(newValue int) *AggrRaidAttributesType { - o.PlexCountPtr = &newValue - return o -} - -// AggrRaidAttributesTypePlexes is a wrapper -type AggrRaidAttributesTypePlexes struct { - XMLName xml.Name `xml:"plexes"` - PlexAttributesPtr []PlexAttributesType `xml:"plex-attributes"` -} - -// PlexAttributes is a 'getter' method -func (o *AggrRaidAttributesTypePlexes) PlexAttributes() []PlexAttributesType { - r := o.PlexAttributesPtr - return r -} - -// SetPlexAttributes is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesTypePlexes) SetPlexAttributes(newValue []PlexAttributesType) *AggrRaidAttributesTypePlexes { - newSlice := make([]PlexAttributesType, len(newValue)) - copy(newSlice, newValue) - o.PlexAttributesPtr = newSlice - return o -} - -// Plexes is a 'getter' method -func (o *AggrRaidAttributesType) Plexes() AggrRaidAttributesTypePlexes { - r := *o.PlexesPtr - return r -} - -// SetPlexes is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetPlexes(newValue AggrRaidAttributesTypePlexes) *AggrRaidAttributesType { - o.PlexesPtr = &newValue - return o -} - -// RaidLostWriteState is a 'getter' method -func (o *AggrRaidAttributesType) RaidLostWriteState() string { - r := *o.RaidLostWriteStatePtr - return r -} - -// SetRaidLostWriteState is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetRaidLostWriteState(newValue string) *AggrRaidAttributesType { - o.RaidLostWriteStatePtr = &newValue - return o -} - -// RaidSize is a 'getter' method -func (o *AggrRaidAttributesType) RaidSize() int { - r := *o.RaidSizePtr - return r -} - -// SetRaidSize is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetRaidSize(newValue int) *AggrRaidAttributesType { - o.RaidSizePtr = &newValue - return o -} - -// RaidStatus is a 'getter' method -func (o *AggrRaidAttributesType) RaidStatus() string { - r := *o.RaidStatusPtr - return r -} - -// SetRaidStatus is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetRaidStatus(newValue string) *AggrRaidAttributesType { - o.RaidStatusPtr = &newValue - return o -} - -// RaidType is a 'getter' method -func (o *AggrRaidAttributesType) RaidType() string { - r := *o.RaidTypePtr - return r -} - -// SetRaidType is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetRaidType(newValue string) *AggrRaidAttributesType { - o.RaidTypePtr = &newValue - return o -} - -// State is a 'getter' method -func (o *AggrRaidAttributesType) State() string { - r := *o.StatePtr - return r -} - -// SetState is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetState(newValue string) *AggrRaidAttributesType { - o.StatePtr = &newValue - return o -} - -// UsesSharedDisks is a 'getter' method -func (o *AggrRaidAttributesType) UsesSharedDisks() bool { - r := *o.UsesSharedDisksPtr - return r -} - -// SetUsesSharedDisks is a fluent style 'setter' method that can be chained -func (o *AggrRaidAttributesType) SetUsesSharedDisks(newValue bool) *AggrRaidAttributesType { - o.UsesSharedDisksPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snaplock-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snaplock-attributes.go deleted file mode 100644 index 605f4ef9b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snaplock-attributes.go +++ /dev/null @@ -1,58 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrSnaplockAttributesType is a structure to represent a aggr-snaplock-attributes ZAPI object -type AggrSnaplockAttributesType struct { - XMLName xml.Name `xml:"aggr-snaplock-attributes"` - IsSnaplockPtr *bool `xml:"is-snaplock"` - SnaplockTypePtr *string `xml:"snaplock-type"` -} - -// NewAggrSnaplockAttributesType is a factory method for creating new instances of AggrSnaplockAttributesType objects -func NewAggrSnaplockAttributesType() *AggrSnaplockAttributesType { - return &AggrSnaplockAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrSnaplockAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSnaplockAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsSnaplock is a 'getter' method -func (o *AggrSnaplockAttributesType) IsSnaplock() bool { - r := *o.IsSnaplockPtr - return r -} - -// SetIsSnaplock is a fluent style 'setter' method that can be chained -func (o *AggrSnaplockAttributesType) SetIsSnaplock(newValue bool) *AggrSnaplockAttributesType { - o.IsSnaplockPtr = &newValue - return o -} - -// SnaplockType is a 'getter' method -func (o *AggrSnaplockAttributesType) SnaplockType() string { - r := *o.SnaplockTypePtr - return r -} - -// SetSnaplockType is a fluent style 'setter' method that can be chained -func (o *AggrSnaplockAttributesType) SetSnaplockType(newValue string) *AggrSnaplockAttributesType { - o.SnaplockTypePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snapmirror-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snapmirror-attributes.go deleted file mode 100644 index 07c50022e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snapmirror-attributes.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrSnapmirrorAttributesType is a structure to represent a aggr-snapmirror-attributes ZAPI object -type AggrSnapmirrorAttributesType struct { - XMLName xml.Name `xml:"aggr-snapmirror-attributes"` - DpSnapmirrorDestinationsPtr *int `xml:"dp-snapmirror-destinations"` - LsSnapmirrorDestinationsPtr *int `xml:"ls-snapmirror-destinations"` - MvSnapmirrorDestinationsPtr *int `xml:"mv-snapmirror-destinations"` -} - -// NewAggrSnapmirrorAttributesType is a factory method for creating new instances of AggrSnapmirrorAttributesType objects -func NewAggrSnapmirrorAttributesType() *AggrSnapmirrorAttributesType { - return &AggrSnapmirrorAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrSnapmirrorAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSnapmirrorAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// DpSnapmirrorDestinations is a 'getter' method -func (o *AggrSnapmirrorAttributesType) DpSnapmirrorDestinations() int { - r := *o.DpSnapmirrorDestinationsPtr - return r -} - -// SetDpSnapmirrorDestinations is a fluent style 'setter' method that can be chained -func (o *AggrSnapmirrorAttributesType) SetDpSnapmirrorDestinations(newValue int) *AggrSnapmirrorAttributesType { - o.DpSnapmirrorDestinationsPtr = &newValue - return o -} - -// LsSnapmirrorDestinations is a 'getter' method -func (o *AggrSnapmirrorAttributesType) LsSnapmirrorDestinations() int { - r := *o.LsSnapmirrorDestinationsPtr - return r -} - -// SetLsSnapmirrorDestinations is a fluent style 'setter' method that can be chained -func (o *AggrSnapmirrorAttributesType) SetLsSnapmirrorDestinations(newValue int) *AggrSnapmirrorAttributesType { - o.LsSnapmirrorDestinationsPtr = &newValue - return o -} - -// MvSnapmirrorDestinations is a 'getter' method -func (o *AggrSnapmirrorAttributesType) MvSnapmirrorDestinations() int { - r := *o.MvSnapmirrorDestinationsPtr - return r -} - -// SetMvSnapmirrorDestinations is a fluent style 'setter' method that can be chained -func (o *AggrSnapmirrorAttributesType) SetMvSnapmirrorDestinations(newValue int) *AggrSnapmirrorAttributesType { - o.MvSnapmirrorDestinationsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snapshot-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snapshot-attributes.go deleted file mode 100644 index 72a5c72ea..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-snapshot-attributes.go +++ /dev/null @@ -1,214 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrSnapshotAttributesType is a structure to represent a aggr-snapshot-attributes ZAPI object -type AggrSnapshotAttributesType struct { - XMLName xml.Name `xml:"aggr-snapshot-attributes"` - FilesTotalPtr *int `xml:"files-total"` - FilesUsedPtr *int `xml:"files-used"` - InofileVersionPtr *int `xml:"inofile-version"` - IsSnapshotAutoCreateEnabledPtr *bool `xml:"is-snapshot-auto-create-enabled"` - IsSnapshotAutoDeleteEnabledPtr *bool `xml:"is-snapshot-auto-delete-enabled"` - MaxfilesAvailablePtr *int `xml:"maxfiles-available"` - MaxfilesPossiblePtr *int `xml:"maxfiles-possible"` - MaxfilesUsedPtr *int `xml:"maxfiles-used"` - PercentInodeUsedCapacityPtr *int `xml:"percent-inode-used-capacity"` - PercentUsedCapacityPtr *int `xml:"percent-used-capacity"` - SizeAvailablePtr *int `xml:"size-available"` - SizeTotalPtr *int `xml:"size-total"` - SizeUsedPtr *int `xml:"size-used"` - SnapshotReservePercentPtr *int `xml:"snapshot-reserve-percent"` -} - -// NewAggrSnapshotAttributesType is a factory method for creating new instances of AggrSnapshotAttributesType objects -func NewAggrSnapshotAttributesType() *AggrSnapshotAttributesType { - return &AggrSnapshotAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrSnapshotAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSnapshotAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// FilesTotal is a 'getter' method -func (o *AggrSnapshotAttributesType) FilesTotal() int { - r := *o.FilesTotalPtr - return r -} - -// SetFilesTotal is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetFilesTotal(newValue int) *AggrSnapshotAttributesType { - o.FilesTotalPtr = &newValue - return o -} - -// FilesUsed is a 'getter' method -func (o *AggrSnapshotAttributesType) FilesUsed() int { - r := *o.FilesUsedPtr - return r -} - -// SetFilesUsed is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetFilesUsed(newValue int) *AggrSnapshotAttributesType { - o.FilesUsedPtr = &newValue - return o -} - -// InofileVersion is a 'getter' method -func (o *AggrSnapshotAttributesType) InofileVersion() int { - r := *o.InofileVersionPtr - return r -} - -// SetInofileVersion is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetInofileVersion(newValue int) *AggrSnapshotAttributesType { - o.InofileVersionPtr = &newValue - return o -} - -// IsSnapshotAutoCreateEnabled is a 'getter' method -func (o *AggrSnapshotAttributesType) IsSnapshotAutoCreateEnabled() bool { - r := *o.IsSnapshotAutoCreateEnabledPtr - return r -} - -// SetIsSnapshotAutoCreateEnabled is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetIsSnapshotAutoCreateEnabled(newValue bool) *AggrSnapshotAttributesType { - o.IsSnapshotAutoCreateEnabledPtr = &newValue - return o -} - -// IsSnapshotAutoDeleteEnabled is a 'getter' method -func (o *AggrSnapshotAttributesType) IsSnapshotAutoDeleteEnabled() bool { - r := *o.IsSnapshotAutoDeleteEnabledPtr - return r -} - -// SetIsSnapshotAutoDeleteEnabled is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetIsSnapshotAutoDeleteEnabled(newValue bool) *AggrSnapshotAttributesType { - o.IsSnapshotAutoDeleteEnabledPtr = &newValue - return o -} - -// MaxfilesAvailable is a 'getter' method -func (o *AggrSnapshotAttributesType) MaxfilesAvailable() int { - r := *o.MaxfilesAvailablePtr - return r -} - -// SetMaxfilesAvailable is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetMaxfilesAvailable(newValue int) *AggrSnapshotAttributesType { - o.MaxfilesAvailablePtr = &newValue - return o -} - -// MaxfilesPossible is a 'getter' method -func (o *AggrSnapshotAttributesType) MaxfilesPossible() int { - r := *o.MaxfilesPossiblePtr - return r -} - -// SetMaxfilesPossible is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetMaxfilesPossible(newValue int) *AggrSnapshotAttributesType { - o.MaxfilesPossiblePtr = &newValue - return o -} - -// MaxfilesUsed is a 'getter' method -func (o *AggrSnapshotAttributesType) MaxfilesUsed() int { - r := *o.MaxfilesUsedPtr - return r -} - -// SetMaxfilesUsed is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetMaxfilesUsed(newValue int) *AggrSnapshotAttributesType { - o.MaxfilesUsedPtr = &newValue - return o -} - -// PercentInodeUsedCapacity is a 'getter' method -func (o *AggrSnapshotAttributesType) PercentInodeUsedCapacity() int { - r := *o.PercentInodeUsedCapacityPtr - return r -} - -// SetPercentInodeUsedCapacity is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetPercentInodeUsedCapacity(newValue int) *AggrSnapshotAttributesType { - o.PercentInodeUsedCapacityPtr = &newValue - return o -} - -// PercentUsedCapacity is a 'getter' method -func (o *AggrSnapshotAttributesType) PercentUsedCapacity() int { - r := *o.PercentUsedCapacityPtr - return r -} - -// SetPercentUsedCapacity is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetPercentUsedCapacity(newValue int) *AggrSnapshotAttributesType { - o.PercentUsedCapacityPtr = &newValue - return o -} - -// SizeAvailable is a 'getter' method -func (o *AggrSnapshotAttributesType) SizeAvailable() int { - r := *o.SizeAvailablePtr - return r -} - -// SetSizeAvailable is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetSizeAvailable(newValue int) *AggrSnapshotAttributesType { - o.SizeAvailablePtr = &newValue - return o -} - -// SizeTotal is a 'getter' method -func (o *AggrSnapshotAttributesType) SizeTotal() int { - r := *o.SizeTotalPtr - return r -} - -// SetSizeTotal is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetSizeTotal(newValue int) *AggrSnapshotAttributesType { - o.SizeTotalPtr = &newValue - return o -} - -// SizeUsed is a 'getter' method -func (o *AggrSnapshotAttributesType) SizeUsed() int { - r := *o.SizeUsedPtr - return r -} - -// SetSizeUsed is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetSizeUsed(newValue int) *AggrSnapshotAttributesType { - o.SizeUsedPtr = &newValue - return o -} - -// SnapshotReservePercent is a 'getter' method -func (o *AggrSnapshotAttributesType) SnapshotReservePercent() int { - r := *o.SnapshotReservePercentPtr - return r -} - -// SetSnapshotReservePercent is a fluent style 'setter' method that can be chained -func (o *AggrSnapshotAttributesType) SetSnapshotReservePercent(newValue int) *AggrSnapshotAttributesType { - o.SnapshotReservePercentPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-space-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-space-attributes.go deleted file mode 100644 index a3d1ff073..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-space-attributes.go +++ /dev/null @@ -1,292 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrSpaceAttributesType is a structure to represent a aggr-space-attributes ZAPI object -type AggrSpaceAttributesType struct { - XMLName xml.Name `xml:"aggr-space-attributes"` - AggregateMetadataPtr *int `xml:"aggregate-metadata"` - CapacityTierUsedPtr *int `xml:"capacity-tier-used"` - DataCompactedCountPtr *int `xml:"data-compacted-count"` - DataCompactionSpaceSavedPtr *int `xml:"data-compaction-space-saved"` - DataCompactionSpaceSavedPercentPtr *int `xml:"data-compaction-space-saved-percent"` - HybridCacheSizeTotalPtr *int `xml:"hybrid-cache-size-total"` - PercentUsedCapacityPtr *int `xml:"percent-used-capacity"` - PerformanceTierInactiveUserDataPtr *int `xml:"performance-tier-inactive-user-data"` - PerformanceTierInactiveUserDataPercentPtr *int `xml:"performance-tier-inactive-user-data-percent"` - PhysicalUsedPtr *int `xml:"physical-used"` - PhysicalUsedPercentPtr *int `xml:"physical-used-percent"` - SisSharedCountPtr *int `xml:"sis-shared-count"` - SisSpaceSavedPtr *int `xml:"sis-space-saved"` - SisSpaceSavedPercentPtr *int `xml:"sis-space-saved-percent"` - SizeAvailablePtr *int `xml:"size-available"` - SizeTotalPtr *int `xml:"size-total"` - SizeUsedPtr *int `xml:"size-used"` - TotalReservedSpacePtr *int `xml:"total-reserved-space"` - UsedIncludingSnapshotReservePtr *int `xml:"used-including-snapshot-reserve"` - VolumeFootprintsPtr *int `xml:"volume-footprints"` -} - -// NewAggrSpaceAttributesType is a factory method for creating new instances of AggrSpaceAttributesType objects -func NewAggrSpaceAttributesType() *AggrSpaceAttributesType { - return &AggrSpaceAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrSpaceAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrSpaceAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggregateMetadata is a 'getter' method -func (o *AggrSpaceAttributesType) AggregateMetadata() int { - r := *o.AggregateMetadataPtr - return r -} - -// SetAggregateMetadata is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetAggregateMetadata(newValue int) *AggrSpaceAttributesType { - o.AggregateMetadataPtr = &newValue - return o -} - -// CapacityTierUsed is a 'getter' method -func (o *AggrSpaceAttributesType) CapacityTierUsed() int { - r := *o.CapacityTierUsedPtr - return r -} - -// SetCapacityTierUsed is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetCapacityTierUsed(newValue int) *AggrSpaceAttributesType { - o.CapacityTierUsedPtr = &newValue - return o -} - -// DataCompactedCount is a 'getter' method -func (o *AggrSpaceAttributesType) DataCompactedCount() int { - r := *o.DataCompactedCountPtr - return r -} - -// SetDataCompactedCount is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetDataCompactedCount(newValue int) *AggrSpaceAttributesType { - o.DataCompactedCountPtr = &newValue - return o -} - -// DataCompactionSpaceSaved is a 'getter' method -func (o *AggrSpaceAttributesType) DataCompactionSpaceSaved() int { - r := *o.DataCompactionSpaceSavedPtr - return r -} - -// SetDataCompactionSpaceSaved is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetDataCompactionSpaceSaved(newValue int) *AggrSpaceAttributesType { - o.DataCompactionSpaceSavedPtr = &newValue - return o -} - -// DataCompactionSpaceSavedPercent is a 'getter' method -func (o *AggrSpaceAttributesType) DataCompactionSpaceSavedPercent() int { - r := *o.DataCompactionSpaceSavedPercentPtr - return r -} - -// SetDataCompactionSpaceSavedPercent is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetDataCompactionSpaceSavedPercent(newValue int) *AggrSpaceAttributesType { - o.DataCompactionSpaceSavedPercentPtr = &newValue - return o -} - -// HybridCacheSizeTotal is a 'getter' method -func (o *AggrSpaceAttributesType) HybridCacheSizeTotal() int { - r := *o.HybridCacheSizeTotalPtr - return r -} - -// SetHybridCacheSizeTotal is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetHybridCacheSizeTotal(newValue int) *AggrSpaceAttributesType { - o.HybridCacheSizeTotalPtr = &newValue - return o -} - -// PercentUsedCapacity is a 'getter' method -func (o *AggrSpaceAttributesType) PercentUsedCapacity() int { - r := *o.PercentUsedCapacityPtr - return r -} - -// SetPercentUsedCapacity is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetPercentUsedCapacity(newValue int) *AggrSpaceAttributesType { - o.PercentUsedCapacityPtr = &newValue - return o -} - -// PerformanceTierInactiveUserData is a 'getter' method -func (o *AggrSpaceAttributesType) PerformanceTierInactiveUserData() int { - r := *o.PerformanceTierInactiveUserDataPtr - return r -} - -// SetPerformanceTierInactiveUserData is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetPerformanceTierInactiveUserData(newValue int) *AggrSpaceAttributesType { - o.PerformanceTierInactiveUserDataPtr = &newValue - return o -} - -// PerformanceTierInactiveUserDataPercent is a 'getter' method -func (o *AggrSpaceAttributesType) PerformanceTierInactiveUserDataPercent() int { - r := *o.PerformanceTierInactiveUserDataPercentPtr - return r -} - -// SetPerformanceTierInactiveUserDataPercent is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetPerformanceTierInactiveUserDataPercent(newValue int) *AggrSpaceAttributesType { - o.PerformanceTierInactiveUserDataPercentPtr = &newValue - return o -} - -// PhysicalUsed is a 'getter' method -func (o *AggrSpaceAttributesType) PhysicalUsed() int { - r := *o.PhysicalUsedPtr - return r -} - -// SetPhysicalUsed is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetPhysicalUsed(newValue int) *AggrSpaceAttributesType { - o.PhysicalUsedPtr = &newValue - return o -} - -// PhysicalUsedPercent is a 'getter' method -func (o *AggrSpaceAttributesType) PhysicalUsedPercent() int { - r := *o.PhysicalUsedPercentPtr - return r -} - -// SetPhysicalUsedPercent is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetPhysicalUsedPercent(newValue int) *AggrSpaceAttributesType { - o.PhysicalUsedPercentPtr = &newValue - return o -} - -// SisSharedCount is a 'getter' method -func (o *AggrSpaceAttributesType) SisSharedCount() int { - r := *o.SisSharedCountPtr - return r -} - -// SetSisSharedCount is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetSisSharedCount(newValue int) *AggrSpaceAttributesType { - o.SisSharedCountPtr = &newValue - return o -} - -// SisSpaceSaved is a 'getter' method -func (o *AggrSpaceAttributesType) SisSpaceSaved() int { - r := *o.SisSpaceSavedPtr - return r -} - -// SetSisSpaceSaved is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetSisSpaceSaved(newValue int) *AggrSpaceAttributesType { - o.SisSpaceSavedPtr = &newValue - return o -} - -// SisSpaceSavedPercent is a 'getter' method -func (o *AggrSpaceAttributesType) SisSpaceSavedPercent() int { - r := *o.SisSpaceSavedPercentPtr - return r -} - -// SetSisSpaceSavedPercent is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetSisSpaceSavedPercent(newValue int) *AggrSpaceAttributesType { - o.SisSpaceSavedPercentPtr = &newValue - return o -} - -// SizeAvailable is a 'getter' method -func (o *AggrSpaceAttributesType) SizeAvailable() int { - r := *o.SizeAvailablePtr - return r -} - -// SetSizeAvailable is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetSizeAvailable(newValue int) *AggrSpaceAttributesType { - o.SizeAvailablePtr = &newValue - return o -} - -// SizeTotal is a 'getter' method -func (o *AggrSpaceAttributesType) SizeTotal() int { - r := *o.SizeTotalPtr - return r -} - -// SetSizeTotal is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetSizeTotal(newValue int) *AggrSpaceAttributesType { - o.SizeTotalPtr = &newValue - return o -} - -// SizeUsed is a 'getter' method -func (o *AggrSpaceAttributesType) SizeUsed() int { - r := *o.SizeUsedPtr - return r -} - -// SetSizeUsed is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetSizeUsed(newValue int) *AggrSpaceAttributesType { - o.SizeUsedPtr = &newValue - return o -} - -// TotalReservedSpace is a 'getter' method -func (o *AggrSpaceAttributesType) TotalReservedSpace() int { - r := *o.TotalReservedSpacePtr - return r -} - -// SetTotalReservedSpace is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetTotalReservedSpace(newValue int) *AggrSpaceAttributesType { - o.TotalReservedSpacePtr = &newValue - return o -} - -// UsedIncludingSnapshotReserve is a 'getter' method -func (o *AggrSpaceAttributesType) UsedIncludingSnapshotReserve() int { - r := *o.UsedIncludingSnapshotReservePtr - return r -} - -// SetUsedIncludingSnapshotReserve is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetUsedIncludingSnapshotReserve(newValue int) *AggrSpaceAttributesType { - o.UsedIncludingSnapshotReservePtr = &newValue - return o -} - -// VolumeFootprints is a 'getter' method -func (o *AggrSpaceAttributesType) VolumeFootprints() int { - r := *o.VolumeFootprintsPtr - return r -} - -// SetVolumeFootprints is a fluent style 'setter' method that can be chained -func (o *AggrSpaceAttributesType) SetVolumeFootprints(newValue int) *AggrSpaceAttributesType { - o.VolumeFootprintsPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-start-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-start-attributes.go deleted file mode 100644 index c0e696af5..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-start-attributes.go +++ /dev/null @@ -1,58 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrStartAttributesType is a structure to represent a aggr-start-attributes ZAPI object -type AggrStartAttributesType struct { - XMLName xml.Name `xml:"aggr-start-attributes"` - MinSpaceForUpgradePtr *int `xml:"min-space-for-upgrade"` - StartLastErrnoPtr *int `xml:"start-last-errno"` -} - -// NewAggrStartAttributesType is a factory method for creating new instances of AggrStartAttributesType objects -func NewAggrStartAttributesType() *AggrStartAttributesType { - return &AggrStartAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrStartAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrStartAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// MinSpaceForUpgrade is a 'getter' method -func (o *AggrStartAttributesType) MinSpaceForUpgrade() int { - r := *o.MinSpaceForUpgradePtr - return r -} - -// SetMinSpaceForUpgrade is a fluent style 'setter' method that can be chained -func (o *AggrStartAttributesType) SetMinSpaceForUpgrade(newValue int) *AggrStartAttributesType { - o.MinSpaceForUpgradePtr = &newValue - return o -} - -// StartLastErrno is a 'getter' method -func (o *AggrStartAttributesType) StartLastErrno() int { - r := *o.StartLastErrnoPtr - return r -} - -// SetStartLastErrno is a fluent style 'setter' method that can be chained -func (o *AggrStartAttributesType) SetStartLastErrno(newValue int) *AggrStartAttributesType { - o.StartLastErrnoPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-status-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-status-attributes.go deleted file mode 100644 index f0e328e39..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-status-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrStatusAttributesType is a structure to represent a aggr-status-attributes ZAPI object -type AggrStatusAttributesType struct { - XMLName xml.Name `xml:"aggr-status-attributes"` - Is64BitUpgradeInProgressPtr *bool `xml:"is-64-bit-upgrade-in-progress"` -} - -// NewAggrStatusAttributesType is a factory method for creating new instances of AggrStatusAttributesType objects -func NewAggrStatusAttributesType() *AggrStatusAttributesType { - return &AggrStatusAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrStatusAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrStatusAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Is64BitUpgradeInProgress is a 'getter' method -func (o *AggrStatusAttributesType) Is64BitUpgradeInProgress() bool { - r := *o.Is64BitUpgradeInProgressPtr - return r -} - -// SetIs64BitUpgradeInProgress is a fluent style 'setter' method that can be chained -func (o *AggrStatusAttributesType) SetIs64BitUpgradeInProgress(newValue bool) *AggrStatusAttributesType { - o.Is64BitUpgradeInProgressPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-striping-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-striping-attributes.go deleted file mode 100644 index b301408cf..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-striping-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrStripingAttributesType is a structure to represent a aggr-striping-attributes ZAPI object -type AggrStripingAttributesType struct { - XMLName xml.Name `xml:"aggr-striping-attributes"` - MemberCountPtr *int `xml:"member-count"` -} - -// NewAggrStripingAttributesType is a factory method for creating new instances of AggrStripingAttributesType objects -func NewAggrStripingAttributesType() *AggrStripingAttributesType { - return &AggrStripingAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrStripingAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrStripingAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// MemberCount is a 'getter' method -func (o *AggrStripingAttributesType) MemberCount() int { - r := *o.MemberCountPtr - return r -} - -// SetMemberCount is a fluent style 'setter' method that can be chained -func (o *AggrStripingAttributesType) SetMemberCount(newValue int) *AggrStripingAttributesType { - o.MemberCountPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-volume-count-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-volume-count-attributes.go deleted file mode 100644 index a01bb6fe4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-volume-count-attributes.go +++ /dev/null @@ -1,97 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrVolumeCountAttributesType is a structure to represent a aggr-volume-count-attributes ZAPI object -type AggrVolumeCountAttributesType struct { - XMLName xml.Name `xml:"aggr-volume-count-attributes"` - FlexvolCountPtr *int `xml:"flexvol-count"` - FlexvolCountCollectivePtr *int `xml:"flexvol-count-collective"` - FlexvolCountNotOnlinePtr *int `xml:"flexvol-count-not-online"` - FlexvolCountQuiescedPtr *int `xml:"flexvol-count-quiesced"` - FlexvolCountStripedPtr *int `xml:"flexvol-count-striped"` -} - -// NewAggrVolumeCountAttributesType is a factory method for creating new instances of AggrVolumeCountAttributesType objects -func NewAggrVolumeCountAttributesType() *AggrVolumeCountAttributesType { - return &AggrVolumeCountAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrVolumeCountAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrVolumeCountAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// FlexvolCount is a 'getter' method -func (o *AggrVolumeCountAttributesType) FlexvolCount() int { - r := *o.FlexvolCountPtr - return r -} - -// SetFlexvolCount is a fluent style 'setter' method that can be chained -func (o *AggrVolumeCountAttributesType) SetFlexvolCount(newValue int) *AggrVolumeCountAttributesType { - o.FlexvolCountPtr = &newValue - return o -} - -// FlexvolCountCollective is a 'getter' method -func (o *AggrVolumeCountAttributesType) FlexvolCountCollective() int { - r := *o.FlexvolCountCollectivePtr - return r -} - -// SetFlexvolCountCollective is a fluent style 'setter' method that can be chained -func (o *AggrVolumeCountAttributesType) SetFlexvolCountCollective(newValue int) *AggrVolumeCountAttributesType { - o.FlexvolCountCollectivePtr = &newValue - return o -} - -// FlexvolCountNotOnline is a 'getter' method -func (o *AggrVolumeCountAttributesType) FlexvolCountNotOnline() int { - r := *o.FlexvolCountNotOnlinePtr - return r -} - -// SetFlexvolCountNotOnline is a fluent style 'setter' method that can be chained -func (o *AggrVolumeCountAttributesType) SetFlexvolCountNotOnline(newValue int) *AggrVolumeCountAttributesType { - o.FlexvolCountNotOnlinePtr = &newValue - return o -} - -// FlexvolCountQuiesced is a 'getter' method -func (o *AggrVolumeCountAttributesType) FlexvolCountQuiesced() int { - r := *o.FlexvolCountQuiescedPtr - return r -} - -// SetFlexvolCountQuiesced is a fluent style 'setter' method that can be chained -func (o *AggrVolumeCountAttributesType) SetFlexvolCountQuiesced(newValue int) *AggrVolumeCountAttributesType { - o.FlexvolCountQuiescedPtr = &newValue - return o -} - -// FlexvolCountStriped is a 'getter' method -func (o *AggrVolumeCountAttributesType) FlexvolCountStriped() int { - r := *o.FlexvolCountStripedPtr - return r -} - -// SetFlexvolCountStriped is a fluent style 'setter' method that can be chained -func (o *AggrVolumeCountAttributesType) SetFlexvolCountStriped(newValue int) *AggrVolumeCountAttributesType { - o.FlexvolCountStripedPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-wafliron-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-wafliron-attributes.go deleted file mode 100644 index 41fb7a1f6..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggr-wafliron-attributes.go +++ /dev/null @@ -1,97 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// AggrWaflironAttributesType is a structure to represent a aggr-wafliron-attributes ZAPI object -type AggrWaflironAttributesType struct { - XMLName xml.Name `xml:"aggr-wafliron-attributes"` - LastStartErrnoPtr *int `xml:"last-start-errno"` - LastStartErrorInfoPtr *string `xml:"last-start-error-info"` - ScanPercentagePtr *int `xml:"scan-percentage"` - StatePtr *string `xml:"state"` - SummaryScanPercentagePtr *int `xml:"summary-scan-percentage"` -} - -// NewAggrWaflironAttributesType is a factory method for creating new instances of AggrWaflironAttributesType objects -func NewAggrWaflironAttributesType() *AggrWaflironAttributesType { - return &AggrWaflironAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *AggrWaflironAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o AggrWaflironAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// LastStartErrno is a 'getter' method -func (o *AggrWaflironAttributesType) LastStartErrno() int { - r := *o.LastStartErrnoPtr - return r -} - -// SetLastStartErrno is a fluent style 'setter' method that can be chained -func (o *AggrWaflironAttributesType) SetLastStartErrno(newValue int) *AggrWaflironAttributesType { - o.LastStartErrnoPtr = &newValue - return o -} - -// LastStartErrorInfo is a 'getter' method -func (o *AggrWaflironAttributesType) LastStartErrorInfo() string { - r := *o.LastStartErrorInfoPtr - return r -} - -// SetLastStartErrorInfo is a fluent style 'setter' method that can be chained -func (o *AggrWaflironAttributesType) SetLastStartErrorInfo(newValue string) *AggrWaflironAttributesType { - o.LastStartErrorInfoPtr = &newValue - return o -} - -// ScanPercentage is a 'getter' method -func (o *AggrWaflironAttributesType) ScanPercentage() int { - r := *o.ScanPercentagePtr - return r -} - -// SetScanPercentage is a fluent style 'setter' method that can be chained -func (o *AggrWaflironAttributesType) SetScanPercentage(newValue int) *AggrWaflironAttributesType { - o.ScanPercentagePtr = &newValue - return o -} - -// State is a 'getter' method -func (o *AggrWaflironAttributesType) State() string { - r := *o.StatePtr - return r -} - -// SetState is a fluent style 'setter' method that can be chained -func (o *AggrWaflironAttributesType) SetState(newValue string) *AggrWaflironAttributesType { - o.StatePtr = &newValue - return o -} - -// SummaryScanPercentage is a 'getter' method -func (o *AggrWaflironAttributesType) SummaryScanPercentage() int { - r := *o.SummaryScanPercentagePtr - return r -} - -// SetSummaryScanPercentage is a fluent style 'setter' method that can be chained -func (o *AggrWaflironAttributesType) SetSummaryScanPercentage(newValue int) *AggrWaflironAttributesType { - o.SummaryScanPercentagePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggregatetype.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggregatetype.go deleted file mode 100644 index b5b7bceae..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-aggregatetype.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// AggregatetypeType is a structure to represent a aggregatetype ZAPI object -type AggregatetypeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-antivirus-policy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-antivirus-policy.go deleted file mode 100644 index 6ef6f5780..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-antivirus-policy.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// AntivirusPolicyType is a structure to represent a antivirus-policy ZAPI object -type AntivirusPolicyType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-autobalance-aggregate-state.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-autobalance-aggregate-state.go deleted file mode 100644 index f67571103..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-autobalance-aggregate-state.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// AutobalanceAggregateStateType is a structure to represent a autobalance-aggregate-state ZAPI object -type AutobalanceAggregateStateType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-block-range.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-block-range.go deleted file mode 100644 index 480539f1e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-block-range.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// BlockRangeType is a structure to represent a block-range ZAPI object -type BlockRangeType struct { - XMLName xml.Name `xml:"block-range"` - BlockCountPtr *int `xml:"block-count"` - DestinationBlockNumberPtr *int `xml:"destination-block-number"` - SourceBlockNumberPtr *int `xml:"source-block-number"` -} - -// NewBlockRangeType is a factory method for creating new instances of BlockRangeType objects -func NewBlockRangeType() *BlockRangeType { - return &BlockRangeType{} -} - -// ToXML converts this object into an xml string representation -func (o *BlockRangeType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o BlockRangeType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BlockCount is a 'getter' method -func (o *BlockRangeType) BlockCount() int { - r := *o.BlockCountPtr - return r -} - -// SetBlockCount is a fluent style 'setter' method that can be chained -func (o *BlockRangeType) SetBlockCount(newValue int) *BlockRangeType { - o.BlockCountPtr = &newValue - return o -} - -// DestinationBlockNumber is a 'getter' method -func (o *BlockRangeType) DestinationBlockNumber() int { - r := *o.DestinationBlockNumberPtr - return r -} - -// SetDestinationBlockNumber is a fluent style 'setter' method that can be chained -func (o *BlockRangeType) SetDestinationBlockNumber(newValue int) *BlockRangeType { - o.DestinationBlockNumberPtr = &newValue - return o -} - -// SourceBlockNumber is a 'getter' method -func (o *BlockRangeType) SourceBlockNumber() int { - r := *o.SourceBlockNumberPtr - return r -} - -// SetSourceBlockNumber is a fluent style 'setter' method that can be chained -func (o *BlockRangeType) SetSourceBlockNumber(newValue int) *BlockRangeType { - o.SourceBlockNumberPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-cache-policy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-cache-policy.go deleted file mode 100644 index de1b893b8..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-cache-policy.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// CachePolicyType is a structure to represent a cache-policy ZAPI object -type CachePolicyType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-data-protocol.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-data-protocol.go deleted file mode 100644 index 5386df023..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-data-protocol.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// DataProtocolType is a structure to represent a data-protocol ZAPI object -type DataProtocolType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-dns-zone.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-dns-zone.go deleted file mode 100644 index 822677e66..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-dns-zone.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// DnsZoneType is a structure to represent a dns-zone ZAPI object -type DnsZoneType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-policy-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-policy-info.go deleted file mode 100644 index cf3458a82..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-policy-info.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// ExportPolicyInfoType is a structure to represent a export-policy-info ZAPI object -type ExportPolicyInfoType struct { - XMLName xml.Name `xml:"export-policy-info"` - PolicyIdPtr *int `xml:"policy-id"` - PolicyNamePtr *ExportPolicyNameType `xml:"policy-name"` - VserverPtr *string `xml:"vserver"` -} - -// NewExportPolicyInfoType is a factory method for creating new instances of ExportPolicyInfoType objects -func NewExportPolicyInfoType() *ExportPolicyInfoType { - return &ExportPolicyInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *ExportPolicyInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportPolicyInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// PolicyId is a 'getter' method -func (o *ExportPolicyInfoType) PolicyId() int { - r := *o.PolicyIdPtr - return r -} - -// SetPolicyId is a fluent style 'setter' method that can be chained -func (o *ExportPolicyInfoType) SetPolicyId(newValue int) *ExportPolicyInfoType { - o.PolicyIdPtr = &newValue - return o -} - -// PolicyName is a 'getter' method -func (o *ExportPolicyInfoType) PolicyName() ExportPolicyNameType { - r := *o.PolicyNamePtr - return r -} - -// SetPolicyName is a fluent style 'setter' method that can be chained -func (o *ExportPolicyInfoType) SetPolicyName(newValue ExportPolicyNameType) *ExportPolicyInfoType { - o.PolicyNamePtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *ExportPolicyInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *ExportPolicyInfoType) SetVserver(newValue string) *ExportPolicyInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-policy-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-policy-name.go deleted file mode 100644 index 81ec9c1fe..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-policy-name.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// ExportPolicyNameType is a structure to represent a export-policy-name ZAPI object -type ExportPolicyNameType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-rule-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-rule-info.go deleted file mode 100644 index 60b7a0fdd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-export-rule-info.go +++ /dev/null @@ -1,285 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// ExportRuleInfoType is a structure to represent a export-rule-info ZAPI object -type ExportRuleInfoType struct { - XMLName xml.Name `xml:"export-rule-info"` - AnonymousUserIdPtr *string `xml:"anonymous-user-id"` - ClientMatchPtr *string `xml:"client-match"` - ExportChownModePtr *ExportchownmodeType `xml:"export-chown-mode"` - ExportNtfsUnixSecurityOpsPtr *ExportntfsunixsecopsType `xml:"export-ntfs-unix-security-ops"` - IsAllowDevIsEnabledPtr *bool `xml:"is-allow-dev-is-enabled"` - IsAllowSetUidEnabledPtr *bool `xml:"is-allow-set-uid-enabled"` - PolicyNamePtr *ExportPolicyNameType `xml:"policy-name"` - ProtocolPtr *ExportRuleInfoTypeProtocol `xml:"protocol"` - // work in progress - RoRulePtr *ExportRuleInfoTypeRoRule `xml:"ro-rule"` - // work in progress - RuleIndexPtr *int `xml:"rule-index"` - RwRulePtr *ExportRuleInfoTypeRwRule `xml:"rw-rule"` - // work in progress - SuperUserSecurityPtr *ExportRuleInfoTypeSuperUserSecurity `xml:"super-user-security"` - // work in progress - VserverNamePtr *string `xml:"vserver-name"` -} - -// NewExportRuleInfoType is a factory method for creating new instances of ExportRuleInfoType objects -func NewExportRuleInfoType() *ExportRuleInfoType { - return &ExportRuleInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *ExportRuleInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ExportRuleInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AnonymousUserId is a 'getter' method -func (o *ExportRuleInfoType) AnonymousUserId() string { - r := *o.AnonymousUserIdPtr - return r -} - -// SetAnonymousUserId is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetAnonymousUserId(newValue string) *ExportRuleInfoType { - o.AnonymousUserIdPtr = &newValue - return o -} - -// ClientMatch is a 'getter' method -func (o *ExportRuleInfoType) ClientMatch() string { - r := *o.ClientMatchPtr - return r -} - -// SetClientMatch is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetClientMatch(newValue string) *ExportRuleInfoType { - o.ClientMatchPtr = &newValue - return o -} - -// ExportChownMode is a 'getter' method -func (o *ExportRuleInfoType) ExportChownMode() ExportchownmodeType { - r := *o.ExportChownModePtr - return r -} - -// SetExportChownMode is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetExportChownMode(newValue ExportchownmodeType) *ExportRuleInfoType { - o.ExportChownModePtr = &newValue - return o -} - -// ExportNtfsUnixSecurityOps is a 'getter' method -func (o *ExportRuleInfoType) ExportNtfsUnixSecurityOps() ExportntfsunixsecopsType { - r := *o.ExportNtfsUnixSecurityOpsPtr - return r -} - -// SetExportNtfsUnixSecurityOps is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetExportNtfsUnixSecurityOps(newValue ExportntfsunixsecopsType) *ExportRuleInfoType { - o.ExportNtfsUnixSecurityOpsPtr = &newValue - return o -} - -// IsAllowDevIsEnabled is a 'getter' method -func (o *ExportRuleInfoType) IsAllowDevIsEnabled() bool { - r := *o.IsAllowDevIsEnabledPtr - return r -} - -// SetIsAllowDevIsEnabled is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetIsAllowDevIsEnabled(newValue bool) *ExportRuleInfoType { - o.IsAllowDevIsEnabledPtr = &newValue - return o -} - -// IsAllowSetUidEnabled is a 'getter' method -func (o *ExportRuleInfoType) IsAllowSetUidEnabled() bool { - r := *o.IsAllowSetUidEnabledPtr - return r -} - -// SetIsAllowSetUidEnabled is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetIsAllowSetUidEnabled(newValue bool) *ExportRuleInfoType { - o.IsAllowSetUidEnabledPtr = &newValue - return o -} - -// PolicyName is a 'getter' method -func (o *ExportRuleInfoType) PolicyName() ExportPolicyNameType { - r := *o.PolicyNamePtr - return r -} - -// SetPolicyName is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetPolicyName(newValue ExportPolicyNameType) *ExportRuleInfoType { - o.PolicyNamePtr = &newValue - return o -} - -// ExportRuleInfoTypeProtocol is a wrapper -type ExportRuleInfoTypeProtocol struct { - XMLName xml.Name `xml:"protocol"` - AccessProtocolPtr []AccessProtocolType `xml:"access-protocol"` -} - -// AccessProtocol is a 'getter' method -func (o *ExportRuleInfoTypeProtocol) AccessProtocol() []AccessProtocolType { - r := o.AccessProtocolPtr - return r -} - -// SetAccessProtocol is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoTypeProtocol) SetAccessProtocol(newValue []AccessProtocolType) *ExportRuleInfoTypeProtocol { - newSlice := make([]AccessProtocolType, len(newValue)) - copy(newSlice, newValue) - o.AccessProtocolPtr = newSlice - return o -} - -// Protocol is a 'getter' method -func (o *ExportRuleInfoType) Protocol() ExportRuleInfoTypeProtocol { - r := *o.ProtocolPtr - return r -} - -// SetProtocol is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetProtocol(newValue ExportRuleInfoTypeProtocol) *ExportRuleInfoType { - o.ProtocolPtr = &newValue - return o -} - -// ExportRuleInfoTypeRoRule is a wrapper -type ExportRuleInfoTypeRoRule struct { - XMLName xml.Name `xml:"ro-rule"` - SecurityFlavorPtr []SecurityFlavorType `xml:"security-flavor"` -} - -// SecurityFlavor is a 'getter' method -func (o *ExportRuleInfoTypeRoRule) SecurityFlavor() []SecurityFlavorType { - r := o.SecurityFlavorPtr - return r -} - -// SetSecurityFlavor is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoTypeRoRule) SetSecurityFlavor(newValue []SecurityFlavorType) *ExportRuleInfoTypeRoRule { - newSlice := make([]SecurityFlavorType, len(newValue)) - copy(newSlice, newValue) - o.SecurityFlavorPtr = newSlice - return o -} - -// RoRule is a 'getter' method -func (o *ExportRuleInfoType) RoRule() ExportRuleInfoTypeRoRule { - r := *o.RoRulePtr - return r -} - -// SetRoRule is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetRoRule(newValue ExportRuleInfoTypeRoRule) *ExportRuleInfoType { - o.RoRulePtr = &newValue - return o -} - -// RuleIndex is a 'getter' method -func (o *ExportRuleInfoType) RuleIndex() int { - r := *o.RuleIndexPtr - return r -} - -// SetRuleIndex is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetRuleIndex(newValue int) *ExportRuleInfoType { - o.RuleIndexPtr = &newValue - return o -} - -// ExportRuleInfoTypeRwRule is a wrapper -type ExportRuleInfoTypeRwRule struct { - XMLName xml.Name `xml:"rw-rule"` - SecurityFlavorPtr []SecurityFlavorType `xml:"security-flavor"` -} - -// SecurityFlavor is a 'getter' method -func (o *ExportRuleInfoTypeRwRule) SecurityFlavor() []SecurityFlavorType { - r := o.SecurityFlavorPtr - return r -} - -// SetSecurityFlavor is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoTypeRwRule) SetSecurityFlavor(newValue []SecurityFlavorType) *ExportRuleInfoTypeRwRule { - newSlice := make([]SecurityFlavorType, len(newValue)) - copy(newSlice, newValue) - o.SecurityFlavorPtr = newSlice - return o -} - -// RwRule is a 'getter' method -func (o *ExportRuleInfoType) RwRule() ExportRuleInfoTypeRwRule { - r := *o.RwRulePtr - return r -} - -// SetRwRule is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetRwRule(newValue ExportRuleInfoTypeRwRule) *ExportRuleInfoType { - o.RwRulePtr = &newValue - return o -} - -// ExportRuleInfoTypeSuperUserSecurity is a wrapper -type ExportRuleInfoTypeSuperUserSecurity struct { - XMLName xml.Name `xml:"super-user-security"` - SecurityFlavorPtr []SecurityFlavorType `xml:"security-flavor"` -} - -// SecurityFlavor is a 'getter' method -func (o *ExportRuleInfoTypeSuperUserSecurity) SecurityFlavor() []SecurityFlavorType { - r := o.SecurityFlavorPtr - return r -} - -// SetSecurityFlavor is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoTypeSuperUserSecurity) SetSecurityFlavor(newValue []SecurityFlavorType) *ExportRuleInfoTypeSuperUserSecurity { - newSlice := make([]SecurityFlavorType, len(newValue)) - copy(newSlice, newValue) - o.SecurityFlavorPtr = newSlice - return o -} - -// SuperUserSecurity is a 'getter' method -func (o *ExportRuleInfoType) SuperUserSecurity() ExportRuleInfoTypeSuperUserSecurity { - r := *o.SuperUserSecurityPtr - return r -} - -// SetSuperUserSecurity is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetSuperUserSecurity(newValue ExportRuleInfoTypeSuperUserSecurity) *ExportRuleInfoType { - o.SuperUserSecurityPtr = &newValue - return o -} - -// VserverName is a 'getter' method -func (o *ExportRuleInfoType) VserverName() string { - r := *o.VserverNamePtr - return r -} - -// SetVserverName is a fluent style 'setter' method that can be chained -func (o *ExportRuleInfoType) SetVserverName(newValue string) *ExportRuleInfoType { - o.VserverNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-exportchownmode.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-exportchownmode.go deleted file mode 100644 index 99a0ddf3e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-exportchownmode.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// ExportchownmodeType is a structure to represent a exportchownmode ZAPI object -type ExportchownmodeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-exportntfsunixsecops.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-exportntfsunixsecops.go deleted file mode 100644 index be563b65d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-exportntfsunixsecops.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// ExportntfsunixsecopsType is a structure to represent a exportntfsunixsecops ZAPI object -type ExportntfsunixsecopsType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-failover-group.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-failover-group.go deleted file mode 100644 index 819ef811c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-failover-group.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// FailoverGroupType is a structure to represent a failover-group ZAPI object -type FailoverGroupType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-group-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-group-info.go deleted file mode 100644 index f4b86ae02..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-group-info.go +++ /dev/null @@ -1,248 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// InitiatorGroupInfoType is a structure to represent a initiator-group-info ZAPI object -type InitiatorGroupInfoType struct { - XMLName xml.Name `xml:"initiator-group-info"` - InitiatorGroupAluaEnabledPtr *bool `xml:"initiator-group-alua-enabled"` - InitiatorGroupDeleteOnUnmapPtr *bool `xml:"initiator-group-delete-on-unmap"` - InitiatorGroupNamePtr *string `xml:"initiator-group-name"` - InitiatorGroupOsTypePtr *InitiatorGroupOsTypeType `xml:"initiator-group-os-type"` - InitiatorGroupPortsetNamePtr *string `xml:"initiator-group-portset-name"` - InitiatorGroupReportScsiNameEnabledPtr *bool `xml:"initiator-group-report-scsi-name-enabled"` - InitiatorGroupThrottleBorrowPtr *bool `xml:"initiator-group-throttle-borrow"` - InitiatorGroupThrottleReservePtr *int `xml:"initiator-group-throttle-reserve"` - InitiatorGroupTypePtr *string `xml:"initiator-group-type"` - InitiatorGroupUsePartnerPtr *bool `xml:"initiator-group-use-partner"` - InitiatorGroupUuidPtr *string `xml:"initiator-group-uuid"` - InitiatorGroupVsaEnabledPtr *bool `xml:"initiator-group-vsa-enabled"` - InitiatorsPtr *InitiatorGroupInfoTypeInitiators `xml:"initiators"` - // work in progress - LunIdPtr *int `xml:"lun-id"` - VserverPtr *string `xml:"vserver"` -} - -// NewInitiatorGroupInfoType is a factory method for creating new instances of InitiatorGroupInfoType objects -func NewInitiatorGroupInfoType() *InitiatorGroupInfoType { - return &InitiatorGroupInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *InitiatorGroupInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o InitiatorGroupInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// InitiatorGroupAluaEnabled is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupAluaEnabled() bool { - r := *o.InitiatorGroupAluaEnabledPtr - return r -} - -// SetInitiatorGroupAluaEnabled is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupAluaEnabled(newValue bool) *InitiatorGroupInfoType { - o.InitiatorGroupAluaEnabledPtr = &newValue - return o -} - -// InitiatorGroupDeleteOnUnmap is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupDeleteOnUnmap() bool { - r := *o.InitiatorGroupDeleteOnUnmapPtr - return r -} - -// SetInitiatorGroupDeleteOnUnmap is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupDeleteOnUnmap(newValue bool) *InitiatorGroupInfoType { - o.InitiatorGroupDeleteOnUnmapPtr = &newValue - return o -} - -// InitiatorGroupName is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupName() string { - r := *o.InitiatorGroupNamePtr - return r -} - -// SetInitiatorGroupName is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupName(newValue string) *InitiatorGroupInfoType { - o.InitiatorGroupNamePtr = &newValue - return o -} - -// InitiatorGroupOsType is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupOsType() InitiatorGroupOsTypeType { - r := *o.InitiatorGroupOsTypePtr - return r -} - -// SetInitiatorGroupOsType is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupOsType(newValue InitiatorGroupOsTypeType) *InitiatorGroupInfoType { - o.InitiatorGroupOsTypePtr = &newValue - return o -} - -// InitiatorGroupPortsetName is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupPortsetName() string { - r := *o.InitiatorGroupPortsetNamePtr - return r -} - -// SetInitiatorGroupPortsetName is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupPortsetName(newValue string) *InitiatorGroupInfoType { - o.InitiatorGroupPortsetNamePtr = &newValue - return o -} - -// InitiatorGroupReportScsiNameEnabled is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupReportScsiNameEnabled() bool { - r := *o.InitiatorGroupReportScsiNameEnabledPtr - return r -} - -// SetInitiatorGroupReportScsiNameEnabled is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupReportScsiNameEnabled(newValue bool) *InitiatorGroupInfoType { - o.InitiatorGroupReportScsiNameEnabledPtr = &newValue - return o -} - -// InitiatorGroupThrottleBorrow is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupThrottleBorrow() bool { - r := *o.InitiatorGroupThrottleBorrowPtr - return r -} - -// SetInitiatorGroupThrottleBorrow is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupThrottleBorrow(newValue bool) *InitiatorGroupInfoType { - o.InitiatorGroupThrottleBorrowPtr = &newValue - return o -} - -// InitiatorGroupThrottleReserve is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupThrottleReserve() int { - r := *o.InitiatorGroupThrottleReservePtr - return r -} - -// SetInitiatorGroupThrottleReserve is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupThrottleReserve(newValue int) *InitiatorGroupInfoType { - o.InitiatorGroupThrottleReservePtr = &newValue - return o -} - -// InitiatorGroupType is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupType() string { - r := *o.InitiatorGroupTypePtr - return r -} - -// SetInitiatorGroupType is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupType(newValue string) *InitiatorGroupInfoType { - o.InitiatorGroupTypePtr = &newValue - return o -} - -// InitiatorGroupUsePartner is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupUsePartner() bool { - r := *o.InitiatorGroupUsePartnerPtr - return r -} - -// SetInitiatorGroupUsePartner is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupUsePartner(newValue bool) *InitiatorGroupInfoType { - o.InitiatorGroupUsePartnerPtr = &newValue - return o -} - -// InitiatorGroupUuid is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupUuid() string { - r := *o.InitiatorGroupUuidPtr - return r -} - -// SetInitiatorGroupUuid is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupUuid(newValue string) *InitiatorGroupInfoType { - o.InitiatorGroupUuidPtr = &newValue - return o -} - -// InitiatorGroupVsaEnabled is a 'getter' method -func (o *InitiatorGroupInfoType) InitiatorGroupVsaEnabled() bool { - r := *o.InitiatorGroupVsaEnabledPtr - return r -} - -// SetInitiatorGroupVsaEnabled is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiatorGroupVsaEnabled(newValue bool) *InitiatorGroupInfoType { - o.InitiatorGroupVsaEnabledPtr = &newValue - return o -} - -// InitiatorGroupInfoTypeInitiators is a wrapper -type InitiatorGroupInfoTypeInitiators struct { - XMLName xml.Name `xml:"initiators"` - InitiatorInfoPtr []InitiatorInfoType `xml:"initiator-info"` -} - -// InitiatorInfo is a 'getter' method -func (o *InitiatorGroupInfoTypeInitiators) InitiatorInfo() []InitiatorInfoType { - r := o.InitiatorInfoPtr - return r -} - -// SetInitiatorInfo is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoTypeInitiators) SetInitiatorInfo(newValue []InitiatorInfoType) *InitiatorGroupInfoTypeInitiators { - newSlice := make([]InitiatorInfoType, len(newValue)) - copy(newSlice, newValue) - o.InitiatorInfoPtr = newSlice - return o -} - -// Initiators is a 'getter' method -func (o *InitiatorGroupInfoType) Initiators() InitiatorGroupInfoTypeInitiators { - r := *o.InitiatorsPtr - return r -} - -// SetInitiators is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetInitiators(newValue InitiatorGroupInfoTypeInitiators) *InitiatorGroupInfoType { - o.InitiatorsPtr = &newValue - return o -} - -// LunId is a 'getter' method -func (o *InitiatorGroupInfoType) LunId() int { - r := *o.LunIdPtr - return r -} - -// SetLunId is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetLunId(newValue int) *InitiatorGroupInfoType { - o.LunIdPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *InitiatorGroupInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *InitiatorGroupInfoType) SetVserver(newValue string) *InitiatorGroupInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-group-os-type.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-group-os-type.go deleted file mode 100644 index 473072e90..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-group-os-type.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// InitiatorGroupOsTypeType is a structure to represent a initiator-group-os-type ZAPI object -type InitiatorGroupOsTypeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-info.go deleted file mode 100644 index e7947b6c4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-initiator-info.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// InitiatorInfoType is a structure to represent a initiator-info ZAPI object -type InitiatorInfoType struct { - XMLName xml.Name `xml:"initiator-info"` - InitiatorNamePtr *string `xml:"initiator-name"` -} - -// NewInitiatorInfoType is a factory method for creating new instances of InitiatorInfoType objects -func NewInitiatorInfoType() *InitiatorInfoType { - return &InitiatorInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *InitiatorInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o InitiatorInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// InitiatorName is a 'getter' method -func (o *InitiatorInfoType) InitiatorName() string { - r := *o.InitiatorNamePtr - return r -} - -// SetInitiatorName is a fluent style 'setter' method that can be chained -func (o *InitiatorInfoType) SetInitiatorName(newValue string) *InitiatorInfoType { - o.InitiatorNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-ip-address.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-ip-address.go deleted file mode 100644 index 6ac67c72c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-ip-address.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// IpAddressType is a structure to represent a ip-address ZAPI object -type IpAddressType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-iscsi-interface-list-entry-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-iscsi-interface-list-entry-info.go deleted file mode 100644 index 38a75808b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-iscsi-interface-list-entry-info.go +++ /dev/null @@ -1,175 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IscsiInterfaceListEntryInfoType is a structure to represent a iscsi-interface-list-entry-info ZAPI object -type IscsiInterfaceListEntryInfoType struct { - XMLName xml.Name `xml:"iscsi-interface-list-entry-info"` - CurrentNodePtr *string `xml:"current-node"` - CurrentPortPtr *string `xml:"current-port"` - InterfaceNamePtr *string `xml:"interface-name"` - IpAddressPtr *string `xml:"ip-address"` - IpPortPtr *int `xml:"ip-port"` - IsInterfaceEnabledPtr *bool `xml:"is-interface-enabled"` - RelativePortIdPtr *int `xml:"relative-port-id"` - SendtargetsFqdnPtr *string `xml:"sendtargets-fqdn"` - TpgroupNamePtr *string `xml:"tpgroup-name"` - TpgroupTagPtr *int `xml:"tpgroup-tag"` - VserverPtr *string `xml:"vserver"` -} - -// NewIscsiInterfaceListEntryInfoType is a factory method for creating new instances of IscsiInterfaceListEntryInfoType objects -func NewIscsiInterfaceListEntryInfoType() *IscsiInterfaceListEntryInfoType { - return &IscsiInterfaceListEntryInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *IscsiInterfaceListEntryInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiInterfaceListEntryInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// CurrentNode is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) CurrentNode() string { - r := *o.CurrentNodePtr - return r -} - -// SetCurrentNode is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetCurrentNode(newValue string) *IscsiInterfaceListEntryInfoType { - o.CurrentNodePtr = &newValue - return o -} - -// CurrentPort is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) CurrentPort() string { - r := *o.CurrentPortPtr - return r -} - -// SetCurrentPort is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetCurrentPort(newValue string) *IscsiInterfaceListEntryInfoType { - o.CurrentPortPtr = &newValue - return o -} - -// InterfaceName is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) InterfaceName() string { - r := *o.InterfaceNamePtr - return r -} - -// SetInterfaceName is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetInterfaceName(newValue string) *IscsiInterfaceListEntryInfoType { - o.InterfaceNamePtr = &newValue - return o -} - -// IpAddress is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) IpAddress() string { - r := *o.IpAddressPtr - return r -} - -// SetIpAddress is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetIpAddress(newValue string) *IscsiInterfaceListEntryInfoType { - o.IpAddressPtr = &newValue - return o -} - -// IpPort is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) IpPort() int { - r := *o.IpPortPtr - return r -} - -// SetIpPort is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetIpPort(newValue int) *IscsiInterfaceListEntryInfoType { - o.IpPortPtr = &newValue - return o -} - -// IsInterfaceEnabled is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) IsInterfaceEnabled() bool { - r := *o.IsInterfaceEnabledPtr - return r -} - -// SetIsInterfaceEnabled is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetIsInterfaceEnabled(newValue bool) *IscsiInterfaceListEntryInfoType { - o.IsInterfaceEnabledPtr = &newValue - return o -} - -// RelativePortId is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) RelativePortId() int { - r := *o.RelativePortIdPtr - return r -} - -// SetRelativePortId is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetRelativePortId(newValue int) *IscsiInterfaceListEntryInfoType { - o.RelativePortIdPtr = &newValue - return o -} - -// SendtargetsFqdn is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) SendtargetsFqdn() string { - r := *o.SendtargetsFqdnPtr - return r -} - -// SetSendtargetsFqdn is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetSendtargetsFqdn(newValue string) *IscsiInterfaceListEntryInfoType { - o.SendtargetsFqdnPtr = &newValue - return o -} - -// TpgroupName is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) TpgroupName() string { - r := *o.TpgroupNamePtr - return r -} - -// SetTpgroupName is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetTpgroupName(newValue string) *IscsiInterfaceListEntryInfoType { - o.TpgroupNamePtr = &newValue - return o -} - -// TpgroupTag is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) TpgroupTag() int { - r := *o.TpgroupTagPtr - return r -} - -// SetTpgroupTag is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetTpgroupTag(newValue int) *IscsiInterfaceListEntryInfoType { - o.TpgroupTagPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *IscsiInterfaceListEntryInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *IscsiInterfaceListEntryInfoType) SetVserver(newValue string) *IscsiInterfaceListEntryInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-iscsi-service-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-iscsi-service-info.go deleted file mode 100644 index b1f34a86d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-iscsi-service-info.go +++ /dev/null @@ -1,162 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// IscsiServiceInfoType is a structure to represent a iscsi-service-info ZAPI object -type IscsiServiceInfoType struct { - XMLName xml.Name `xml:"iscsi-service-info"` - AliasNamePtr *string `xml:"alias-name"` - IsAvailablePtr *bool `xml:"is-available"` - LoginTimeoutPtr *int `xml:"login-timeout"` - MaxCmdsPerSessionPtr *int `xml:"max-cmds-per-session"` - MaxConnPerSessionPtr *int `xml:"max-conn-per-session"` - MaxErrorRecoveryLevelPtr *int `xml:"max-error-recovery-level"` - NodeNamePtr *string `xml:"node-name"` - RetainTimeoutPtr *int `xml:"retain-timeout"` - TcpWindowSizePtr *int `xml:"tcp-window-size"` - VserverPtr *string `xml:"vserver"` -} - -// NewIscsiServiceInfoType is a factory method for creating new instances of IscsiServiceInfoType objects -func NewIscsiServiceInfoType() *IscsiServiceInfoType { - return &IscsiServiceInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *IscsiServiceInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o IscsiServiceInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AliasName is a 'getter' method -func (o *IscsiServiceInfoType) AliasName() string { - r := *o.AliasNamePtr - return r -} - -// SetAliasName is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetAliasName(newValue string) *IscsiServiceInfoType { - o.AliasNamePtr = &newValue - return o -} - -// IsAvailable is a 'getter' method -func (o *IscsiServiceInfoType) IsAvailable() bool { - r := *o.IsAvailablePtr - return r -} - -// SetIsAvailable is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetIsAvailable(newValue bool) *IscsiServiceInfoType { - o.IsAvailablePtr = &newValue - return o -} - -// LoginTimeout is a 'getter' method -func (o *IscsiServiceInfoType) LoginTimeout() int { - r := *o.LoginTimeoutPtr - return r -} - -// SetLoginTimeout is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetLoginTimeout(newValue int) *IscsiServiceInfoType { - o.LoginTimeoutPtr = &newValue - return o -} - -// MaxCmdsPerSession is a 'getter' method -func (o *IscsiServiceInfoType) MaxCmdsPerSession() int { - r := *o.MaxCmdsPerSessionPtr - return r -} - -// SetMaxCmdsPerSession is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetMaxCmdsPerSession(newValue int) *IscsiServiceInfoType { - o.MaxCmdsPerSessionPtr = &newValue - return o -} - -// MaxConnPerSession is a 'getter' method -func (o *IscsiServiceInfoType) MaxConnPerSession() int { - r := *o.MaxConnPerSessionPtr - return r -} - -// SetMaxConnPerSession is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetMaxConnPerSession(newValue int) *IscsiServiceInfoType { - o.MaxConnPerSessionPtr = &newValue - return o -} - -// MaxErrorRecoveryLevel is a 'getter' method -func (o *IscsiServiceInfoType) MaxErrorRecoveryLevel() int { - r := *o.MaxErrorRecoveryLevelPtr - return r -} - -// SetMaxErrorRecoveryLevel is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetMaxErrorRecoveryLevel(newValue int) *IscsiServiceInfoType { - o.MaxErrorRecoveryLevelPtr = &newValue - return o -} - -// NodeName is a 'getter' method -func (o *IscsiServiceInfoType) NodeName() string { - r := *o.NodeNamePtr - return r -} - -// SetNodeName is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetNodeName(newValue string) *IscsiServiceInfoType { - o.NodeNamePtr = &newValue - return o -} - -// RetainTimeout is a 'getter' method -func (o *IscsiServiceInfoType) RetainTimeout() int { - r := *o.RetainTimeoutPtr - return r -} - -// SetRetainTimeout is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetRetainTimeout(newValue int) *IscsiServiceInfoType { - o.RetainTimeoutPtr = &newValue - return o -} - -// TcpWindowSize is a 'getter' method -func (o *IscsiServiceInfoType) TcpWindowSize() int { - r := *o.TcpWindowSizePtr - return r -} - -// SetTcpWindowSize is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetTcpWindowSize(newValue int) *IscsiServiceInfoType { - o.TcpWindowSizePtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *IscsiServiceInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *IscsiServiceInfoType) SetVserver(newValue string) *IscsiServiceInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-info.go deleted file mode 100644 index 8605daf7e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-info.go +++ /dev/null @@ -1,292 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// JobInfoType is a structure to represent a job-info ZAPI object -type JobInfoType struct { - XMLName xml.Name `xml:"job-info"` - IsRestartedPtr *bool `xml:"is-restarted"` - JobCategoryPtr *string `xml:"job-category"` - JobCompletionPtr *string `xml:"job-completion"` - JobDescriptionPtr *string `xml:"job-description"` - JobDropdeadTimePtr *int `xml:"job-dropdead-time"` - JobEndTimePtr *int `xml:"job-end-time"` - JobIdPtr *int `xml:"job-id"` - JobNamePtr *string `xml:"job-name"` - JobNodePtr *NodeNameType `xml:"job-node"` - JobPriorityPtr *JobPriorityType `xml:"job-priority"` - JobProgressPtr *string `xml:"job-progress"` - JobQueueTimePtr *int `xml:"job-queue-time"` - JobSchedulePtr *string `xml:"job-schedule"` - JobStartTimePtr *int `xml:"job-start-time"` - JobStatePtr *JobStateType `xml:"job-state"` - JobStatusCodePtr *int `xml:"job-status-code"` - JobTypePtr *string `xml:"job-type"` - JobUsernamePtr *string `xml:"job-username"` - JobUuidPtr *UuidType `xml:"job-uuid"` - JobVserverPtr *VserverNameType `xml:"job-vserver"` -} - -// NewJobInfoType is a factory method for creating new instances of JobInfoType objects -func NewJobInfoType() *JobInfoType { - return &JobInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *JobInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o JobInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsRestarted is a 'getter' method -func (o *JobInfoType) IsRestarted() bool { - r := *o.IsRestartedPtr - return r -} - -// SetIsRestarted is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetIsRestarted(newValue bool) *JobInfoType { - o.IsRestartedPtr = &newValue - return o -} - -// JobCategory is a 'getter' method -func (o *JobInfoType) JobCategory() string { - r := *o.JobCategoryPtr - return r -} - -// SetJobCategory is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobCategory(newValue string) *JobInfoType { - o.JobCategoryPtr = &newValue - return o -} - -// JobCompletion is a 'getter' method -func (o *JobInfoType) JobCompletion() string { - r := *o.JobCompletionPtr - return r -} - -// SetJobCompletion is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobCompletion(newValue string) *JobInfoType { - o.JobCompletionPtr = &newValue - return o -} - -// JobDescription is a 'getter' method -func (o *JobInfoType) JobDescription() string { - r := *o.JobDescriptionPtr - return r -} - -// SetJobDescription is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobDescription(newValue string) *JobInfoType { - o.JobDescriptionPtr = &newValue - return o -} - -// JobDropdeadTime is a 'getter' method -func (o *JobInfoType) JobDropdeadTime() int { - r := *o.JobDropdeadTimePtr - return r -} - -// SetJobDropdeadTime is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobDropdeadTime(newValue int) *JobInfoType { - o.JobDropdeadTimePtr = &newValue - return o -} - -// JobEndTime is a 'getter' method -func (o *JobInfoType) JobEndTime() int { - r := *o.JobEndTimePtr - return r -} - -// SetJobEndTime is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobEndTime(newValue int) *JobInfoType { - o.JobEndTimePtr = &newValue - return o -} - -// JobId is a 'getter' method -func (o *JobInfoType) JobId() int { - r := *o.JobIdPtr - return r -} - -// SetJobId is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobId(newValue int) *JobInfoType { - o.JobIdPtr = &newValue - return o -} - -// JobName is a 'getter' method -func (o *JobInfoType) JobName() string { - r := *o.JobNamePtr - return r -} - -// SetJobName is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobName(newValue string) *JobInfoType { - o.JobNamePtr = &newValue - return o -} - -// JobNode is a 'getter' method -func (o *JobInfoType) JobNode() NodeNameType { - r := *o.JobNodePtr - return r -} - -// SetJobNode is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobNode(newValue NodeNameType) *JobInfoType { - o.JobNodePtr = &newValue - return o -} - -// JobPriority is a 'getter' method -func (o *JobInfoType) JobPriority() JobPriorityType { - r := *o.JobPriorityPtr - return r -} - -// SetJobPriority is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobPriority(newValue JobPriorityType) *JobInfoType { - o.JobPriorityPtr = &newValue - return o -} - -// JobProgress is a 'getter' method -func (o *JobInfoType) JobProgress() string { - r := *o.JobProgressPtr - return r -} - -// SetJobProgress is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobProgress(newValue string) *JobInfoType { - o.JobProgressPtr = &newValue - return o -} - -// JobQueueTime is a 'getter' method -func (o *JobInfoType) JobQueueTime() int { - r := *o.JobQueueTimePtr - return r -} - -// SetJobQueueTime is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobQueueTime(newValue int) *JobInfoType { - o.JobQueueTimePtr = &newValue - return o -} - -// JobSchedule is a 'getter' method -func (o *JobInfoType) JobSchedule() string { - r := *o.JobSchedulePtr - return r -} - -// SetJobSchedule is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobSchedule(newValue string) *JobInfoType { - o.JobSchedulePtr = &newValue - return o -} - -// JobStartTime is a 'getter' method -func (o *JobInfoType) JobStartTime() int { - r := *o.JobStartTimePtr - return r -} - -// SetJobStartTime is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobStartTime(newValue int) *JobInfoType { - o.JobStartTimePtr = &newValue - return o -} - -// JobState is a 'getter' method -func (o *JobInfoType) JobState() JobStateType { - r := *o.JobStatePtr - return r -} - -// SetJobState is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobState(newValue JobStateType) *JobInfoType { - o.JobStatePtr = &newValue - return o -} - -// JobStatusCode is a 'getter' method -func (o *JobInfoType) JobStatusCode() int { - r := *o.JobStatusCodePtr - return r -} - -// SetJobStatusCode is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobStatusCode(newValue int) *JobInfoType { - o.JobStatusCodePtr = &newValue - return o -} - -// JobType is a 'getter' method -func (o *JobInfoType) JobType() string { - r := *o.JobTypePtr - return r -} - -// SetJobType is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobType(newValue string) *JobInfoType { - o.JobTypePtr = &newValue - return o -} - -// JobUsername is a 'getter' method -func (o *JobInfoType) JobUsername() string { - r := *o.JobUsernamePtr - return r -} - -// SetJobUsername is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobUsername(newValue string) *JobInfoType { - o.JobUsernamePtr = &newValue - return o -} - -// JobUuid is a 'getter' method -func (o *JobInfoType) JobUuid() UuidType { - r := *o.JobUuidPtr - return r -} - -// SetJobUuid is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobUuid(newValue UuidType) *JobInfoType { - o.JobUuidPtr = &newValue - return o -} - -// JobVserver is a 'getter' method -func (o *JobInfoType) JobVserver() VserverNameType { - r := *o.JobVserverPtr - return r -} - -// SetJobVserver is a fluent style 'setter' method that can be chained -func (o *JobInfoType) SetJobVserver(newValue VserverNameType) *JobInfoType { - o.JobVserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-priority.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-priority.go deleted file mode 100644 index 99512ab93..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-priority.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// JobPriorityType is a structure to represent a job-priority ZAPI object -type JobPriorityType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-state.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-state.go deleted file mode 100644 index 442cf1933..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-job-state.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// JobStateType is a structure to represent a job-state ZAPI object -type JobStateType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-junction-path.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-junction-path.go deleted file mode 100644 index 239ab5749..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-junction-path.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// JunctionPathType is a structure to represent a junction-path ZAPI object -type JunctionPathType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-language-code.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-language-code.go deleted file mode 100644 index 28406aba9..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-language-code.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// LanguageCodeType is a structure to represent a language-code ZAPI object -type LanguageCodeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-lun-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-lun-info.go deleted file mode 100644 index 16571953c..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-lun-info.go +++ /dev/null @@ -1,552 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// LunInfoType is a structure to represent a lun-info ZAPI object -type LunInfoType struct { - XMLName xml.Name `xml:"lun-info"` - AlignmentPtr *string `xml:"alignment"` - ApplicationPtr *string `xml:"application"` - ApplicationUuidPtr *string `xml:"application-uuid"` - BackingSnapshotPtr *string `xml:"backing-snapshot"` - BlockSizePtr *int `xml:"block-size"` - CachingPolicyPtr *string `xml:"caching-policy"` - ClassPtr *string `xml:"class"` - CloneBackingSnapshotPtr *string `xml:"clone-backing-snapshot"` - CommentPtr *string `xml:"comment"` - CreationTimestampPtr *int `xml:"creation-timestamp"` - DeviceBinaryIdPtr *string `xml:"device-binary-id"` - DeviceIdPtr *int `xml:"device-id"` - DeviceTextIdPtr *string `xml:"device-text-id"` - IsClonePtr *bool `xml:"is-clone"` - IsCloneAutodeleteEnabledPtr *bool `xml:"is-clone-autodelete-enabled"` - IsInconsistentImportPtr *bool `xml:"is-inconsistent-import"` - IsRestoreInaccessiblePtr *bool `xml:"is-restore-inaccessible"` - IsSpaceAllocEnabledPtr *bool `xml:"is-space-alloc-enabled"` - IsSpaceReservationEnabledPtr *bool `xml:"is-space-reservation-enabled"` - MappedPtr *bool `xml:"mapped"` - MultiprotocolTypePtr *LunOsTypeType `xml:"multiprotocol-type"` - NodePtr *NodeNameType `xml:"node"` - OnlinePtr *bool `xml:"online"` - PathPtr *string `xml:"path"` - PrefixSizePtr *int `xml:"prefix-size"` - QosAdaptivePolicyGroupPtr *string `xml:"qos-adaptive-policy-group"` - QosPolicyGroupPtr *string `xml:"qos-policy-group"` - QtreePtr *string `xml:"qtree"` - ReadOnlyPtr *bool `xml:"read-only"` - Serial7ModePtr *string `xml:"serial-7-mode"` - SerialNumberPtr *string `xml:"serial-number"` - ShareStatePtr *string `xml:"share-state"` - SizePtr *int `xml:"size"` - SizeUsedPtr *int `xml:"size-used"` - StagingPtr *bool `xml:"staging"` - StatePtr *string `xml:"state"` - SuffixSizePtr *int `xml:"suffix-size"` - UuidPtr *string `xml:"uuid"` - VolumePtr *string `xml:"volume"` - VserverPtr *string `xml:"vserver"` -} - -// NewLunInfoType is a factory method for creating new instances of LunInfoType objects -func NewLunInfoType() *LunInfoType { - return &LunInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *LunInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o LunInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Alignment is a 'getter' method -func (o *LunInfoType) Alignment() string { - r := *o.AlignmentPtr - return r -} - -// SetAlignment is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetAlignment(newValue string) *LunInfoType { - o.AlignmentPtr = &newValue - return o -} - -// Application is a 'getter' method -func (o *LunInfoType) Application() string { - r := *o.ApplicationPtr - return r -} - -// SetApplication is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetApplication(newValue string) *LunInfoType { - o.ApplicationPtr = &newValue - return o -} - -// ApplicationUuid is a 'getter' method -func (o *LunInfoType) ApplicationUuid() string { - r := *o.ApplicationUuidPtr - return r -} - -// SetApplicationUuid is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetApplicationUuid(newValue string) *LunInfoType { - o.ApplicationUuidPtr = &newValue - return o -} - -// BackingSnapshot is a 'getter' method -func (o *LunInfoType) BackingSnapshot() string { - r := *o.BackingSnapshotPtr - return r -} - -// SetBackingSnapshot is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetBackingSnapshot(newValue string) *LunInfoType { - o.BackingSnapshotPtr = &newValue - return o -} - -// BlockSize is a 'getter' method -func (o *LunInfoType) BlockSize() int { - r := *o.BlockSizePtr - return r -} - -// SetBlockSize is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetBlockSize(newValue int) *LunInfoType { - o.BlockSizePtr = &newValue - return o -} - -// CachingPolicy is a 'getter' method -func (o *LunInfoType) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetCachingPolicy(newValue string) *LunInfoType { - o.CachingPolicyPtr = &newValue - return o -} - -// Class is a 'getter' method -func (o *LunInfoType) Class() string { - r := *o.ClassPtr - return r -} - -// SetClass is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetClass(newValue string) *LunInfoType { - o.ClassPtr = &newValue - return o -} - -// CloneBackingSnapshot is a 'getter' method -func (o *LunInfoType) CloneBackingSnapshot() string { - r := *o.CloneBackingSnapshotPtr - return r -} - -// SetCloneBackingSnapshot is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetCloneBackingSnapshot(newValue string) *LunInfoType { - o.CloneBackingSnapshotPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *LunInfoType) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetComment(newValue string) *LunInfoType { - o.CommentPtr = &newValue - return o -} - -// CreationTimestamp is a 'getter' method -func (o *LunInfoType) CreationTimestamp() int { - r := *o.CreationTimestampPtr - return r -} - -// SetCreationTimestamp is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetCreationTimestamp(newValue int) *LunInfoType { - o.CreationTimestampPtr = &newValue - return o -} - -// DeviceBinaryId is a 'getter' method -func (o *LunInfoType) DeviceBinaryId() string { - r := *o.DeviceBinaryIdPtr - return r -} - -// SetDeviceBinaryId is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetDeviceBinaryId(newValue string) *LunInfoType { - o.DeviceBinaryIdPtr = &newValue - return o -} - -// DeviceId is a 'getter' method -func (o *LunInfoType) DeviceId() int { - r := *o.DeviceIdPtr - return r -} - -// SetDeviceId is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetDeviceId(newValue int) *LunInfoType { - o.DeviceIdPtr = &newValue - return o -} - -// DeviceTextId is a 'getter' method -func (o *LunInfoType) DeviceTextId() string { - r := *o.DeviceTextIdPtr - return r -} - -// SetDeviceTextId is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetDeviceTextId(newValue string) *LunInfoType { - o.DeviceTextIdPtr = &newValue - return o -} - -// IsClone is a 'getter' method -func (o *LunInfoType) IsClone() bool { - r := *o.IsClonePtr - return r -} - -// SetIsClone is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetIsClone(newValue bool) *LunInfoType { - o.IsClonePtr = &newValue - return o -} - -// IsCloneAutodeleteEnabled is a 'getter' method -func (o *LunInfoType) IsCloneAutodeleteEnabled() bool { - r := *o.IsCloneAutodeleteEnabledPtr - return r -} - -// SetIsCloneAutodeleteEnabled is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetIsCloneAutodeleteEnabled(newValue bool) *LunInfoType { - o.IsCloneAutodeleteEnabledPtr = &newValue - return o -} - -// IsInconsistentImport is a 'getter' method -func (o *LunInfoType) IsInconsistentImport() bool { - r := *o.IsInconsistentImportPtr - return r -} - -// SetIsInconsistentImport is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetIsInconsistentImport(newValue bool) *LunInfoType { - o.IsInconsistentImportPtr = &newValue - return o -} - -// IsRestoreInaccessible is a 'getter' method -func (o *LunInfoType) IsRestoreInaccessible() bool { - r := *o.IsRestoreInaccessiblePtr - return r -} - -// SetIsRestoreInaccessible is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetIsRestoreInaccessible(newValue bool) *LunInfoType { - o.IsRestoreInaccessiblePtr = &newValue - return o -} - -// IsSpaceAllocEnabled is a 'getter' method -func (o *LunInfoType) IsSpaceAllocEnabled() bool { - r := *o.IsSpaceAllocEnabledPtr - return r -} - -// SetIsSpaceAllocEnabled is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetIsSpaceAllocEnabled(newValue bool) *LunInfoType { - o.IsSpaceAllocEnabledPtr = &newValue - return o -} - -// IsSpaceReservationEnabled is a 'getter' method -func (o *LunInfoType) IsSpaceReservationEnabled() bool { - r := *o.IsSpaceReservationEnabledPtr - return r -} - -// SetIsSpaceReservationEnabled is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetIsSpaceReservationEnabled(newValue bool) *LunInfoType { - o.IsSpaceReservationEnabledPtr = &newValue - return o -} - -// Mapped is a 'getter' method -func (o *LunInfoType) Mapped() bool { - r := *o.MappedPtr - return r -} - -// SetMapped is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetMapped(newValue bool) *LunInfoType { - o.MappedPtr = &newValue - return o -} - -// MultiprotocolType is a 'getter' method -func (o *LunInfoType) MultiprotocolType() LunOsTypeType { - r := *o.MultiprotocolTypePtr - return r -} - -// SetMultiprotocolType is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetMultiprotocolType(newValue LunOsTypeType) *LunInfoType { - o.MultiprotocolTypePtr = &newValue - return o -} - -// Node is a 'getter' method -func (o *LunInfoType) Node() NodeNameType { - r := *o.NodePtr - return r -} - -// SetNode is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetNode(newValue NodeNameType) *LunInfoType { - o.NodePtr = &newValue - return o -} - -// Online is a 'getter' method -func (o *LunInfoType) Online() bool { - r := *o.OnlinePtr - return r -} - -// SetOnline is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetOnline(newValue bool) *LunInfoType { - o.OnlinePtr = &newValue - return o -} - -// Path is a 'getter' method -func (o *LunInfoType) Path() string { - r := *o.PathPtr - return r -} - -// SetPath is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetPath(newValue string) *LunInfoType { - o.PathPtr = &newValue - return o -} - -// PrefixSize is a 'getter' method -func (o *LunInfoType) PrefixSize() int { - r := *o.PrefixSizePtr - return r -} - -// SetPrefixSize is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetPrefixSize(newValue int) *LunInfoType { - o.PrefixSizePtr = &newValue - return o -} - -// QosAdaptivePolicyGroup is a 'getter' method -func (o *LunInfoType) QosAdaptivePolicyGroup() string { - r := *o.QosAdaptivePolicyGroupPtr - return r -} - -// SetQosAdaptivePolicyGroup is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetQosAdaptivePolicyGroup(newValue string) *LunInfoType { - o.QosAdaptivePolicyGroupPtr = &newValue - return o -} - -// QosPolicyGroup is a 'getter' method -func (o *LunInfoType) QosPolicyGroup() string { - r := *o.QosPolicyGroupPtr - return r -} - -// SetQosPolicyGroup is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetQosPolicyGroup(newValue string) *LunInfoType { - o.QosPolicyGroupPtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *LunInfoType) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetQtree(newValue string) *LunInfoType { - o.QtreePtr = &newValue - return o -} - -// ReadOnly is a 'getter' method -func (o *LunInfoType) ReadOnly() bool { - r := *o.ReadOnlyPtr - return r -} - -// SetReadOnly is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetReadOnly(newValue bool) *LunInfoType { - o.ReadOnlyPtr = &newValue - return o -} - -// Serial7Mode is a 'getter' method -func (o *LunInfoType) Serial7Mode() string { - r := *o.Serial7ModePtr - return r -} - -// SetSerial7Mode is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetSerial7Mode(newValue string) *LunInfoType { - o.Serial7ModePtr = &newValue - return o -} - -// SerialNumber is a 'getter' method -func (o *LunInfoType) SerialNumber() string { - r := *o.SerialNumberPtr - return r -} - -// SetSerialNumber is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetSerialNumber(newValue string) *LunInfoType { - o.SerialNumberPtr = &newValue - return o -} - -// ShareState is a 'getter' method -func (o *LunInfoType) ShareState() string { - r := *o.ShareStatePtr - return r -} - -// SetShareState is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetShareState(newValue string) *LunInfoType { - o.ShareStatePtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *LunInfoType) Size() int { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetSize(newValue int) *LunInfoType { - o.SizePtr = &newValue - return o -} - -// SizeUsed is a 'getter' method -func (o *LunInfoType) SizeUsed() int { - r := *o.SizeUsedPtr - return r -} - -// SetSizeUsed is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetSizeUsed(newValue int) *LunInfoType { - o.SizeUsedPtr = &newValue - return o -} - -// Staging is a 'getter' method -func (o *LunInfoType) Staging() bool { - r := *o.StagingPtr - return r -} - -// SetStaging is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetStaging(newValue bool) *LunInfoType { - o.StagingPtr = &newValue - return o -} - -// State is a 'getter' method -func (o *LunInfoType) State() string { - r := *o.StatePtr - return r -} - -// SetState is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetState(newValue string) *LunInfoType { - o.StatePtr = &newValue - return o -} - -// SuffixSize is a 'getter' method -func (o *LunInfoType) SuffixSize() int { - r := *o.SuffixSizePtr - return r -} - -// SetSuffixSize is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetSuffixSize(newValue int) *LunInfoType { - o.SuffixSizePtr = &newValue - return o -} - -// Uuid is a 'getter' method -func (o *LunInfoType) Uuid() string { - r := *o.UuidPtr - return r -} - -// SetUuid is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetUuid(newValue string) *LunInfoType { - o.UuidPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *LunInfoType) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetVolume(newValue string) *LunInfoType { - o.VolumePtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *LunInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *LunInfoType) SetVserver(newValue string) *LunInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-lun-os-type.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-lun-os-type.go deleted file mode 100644 index c504f52a2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-lun-os-type.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// LunOsTypeType is a structure to represent a lun-os-type ZAPI object -type LunOsTypeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-net-interface-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-net-interface-info.go deleted file mode 100644 index 15bc09e46..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-net-interface-info.go +++ /dev/null @@ -1,469 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// NetInterfaceInfoType is a structure to represent a net-interface-info ZAPI object -type NetInterfaceInfoType struct { - XMLName xml.Name `xml:"net-interface-info"` - AddressPtr *IpAddressType `xml:"address"` - AddressFamilyPtr *string `xml:"address-family"` - AdministrativeStatusPtr *string `xml:"administrative-status"` - CommentPtr *string `xml:"comment"` - CurrentNodePtr *string `xml:"current-node"` - CurrentPortPtr *string `xml:"current-port"` - DataProtocolsPtr *NetInterfaceInfoTypeDataProtocols `xml:"data-protocols"` - // work in progress - DnsDomainNamePtr *DnsZoneType `xml:"dns-domain-name"` - ExtendedStatusPtr *string `xml:"extended-status"` - FailoverGroupPtr *FailoverGroupType `xml:"failover-group"` - FailoverPolicyPtr *string `xml:"failover-policy"` - FirewallPolicyPtr *string `xml:"firewall-policy"` - ForceSubnetAssociationPtr *bool `xml:"force-subnet-association"` - HomeNodePtr *string `xml:"home-node"` - HomePortPtr *string `xml:"home-port"` - InterfaceNamePtr *string `xml:"interface-name"` - IpspacePtr *string `xml:"ipspace"` - IsAutoRevertPtr *bool `xml:"is-auto-revert"` - IsDnsUpdateEnabledPtr *bool `xml:"is-dns-update-enabled"` - IsHomePtr *bool `xml:"is-home"` - IsIpv4LinkLocalPtr *bool `xml:"is-ipv4-link-local"` - LifUuidPtr *UuidType `xml:"lif-uuid"` - ListenForDnsQueryPtr *bool `xml:"listen-for-dns-query"` - NetmaskPtr *IpAddressType `xml:"netmask"` - NetmaskLengthPtr *int `xml:"netmask-length"` - OperationalStatusPtr *string `xml:"operational-status"` - RolePtr *string `xml:"role"` - RoutingGroupNamePtr *RoutingGroupTypeType `xml:"routing-group-name"` - SubnetNamePtr *SubnetNameType `xml:"subnet-name"` - UseFailoverGroupPtr *string `xml:"use-failover-group"` - VserverPtr *string `xml:"vserver"` - WwpnPtr *string `xml:"wwpn"` -} - -// NewNetInterfaceInfoType is a factory method for creating new instances of NetInterfaceInfoType objects -func NewNetInterfaceInfoType() *NetInterfaceInfoType { - return &NetInterfaceInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *NetInterfaceInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NetInterfaceInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Address is a 'getter' method -func (o *NetInterfaceInfoType) Address() IpAddressType { - r := *o.AddressPtr - return r -} - -// SetAddress is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetAddress(newValue IpAddressType) *NetInterfaceInfoType { - o.AddressPtr = &newValue - return o -} - -// AddressFamily is a 'getter' method -func (o *NetInterfaceInfoType) AddressFamily() string { - r := *o.AddressFamilyPtr - return r -} - -// SetAddressFamily is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetAddressFamily(newValue string) *NetInterfaceInfoType { - o.AddressFamilyPtr = &newValue - return o -} - -// AdministrativeStatus is a 'getter' method -func (o *NetInterfaceInfoType) AdministrativeStatus() string { - r := *o.AdministrativeStatusPtr - return r -} - -// SetAdministrativeStatus is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetAdministrativeStatus(newValue string) *NetInterfaceInfoType { - o.AdministrativeStatusPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *NetInterfaceInfoType) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetComment(newValue string) *NetInterfaceInfoType { - o.CommentPtr = &newValue - return o -} - -// CurrentNode is a 'getter' method -func (o *NetInterfaceInfoType) CurrentNode() string { - r := *o.CurrentNodePtr - return r -} - -// SetCurrentNode is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetCurrentNode(newValue string) *NetInterfaceInfoType { - o.CurrentNodePtr = &newValue - return o -} - -// CurrentPort is a 'getter' method -func (o *NetInterfaceInfoType) CurrentPort() string { - r := *o.CurrentPortPtr - return r -} - -// SetCurrentPort is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetCurrentPort(newValue string) *NetInterfaceInfoType { - o.CurrentPortPtr = &newValue - return o -} - -// NetInterfaceInfoTypeDataProtocols is a wrapper -type NetInterfaceInfoTypeDataProtocols struct { - XMLName xml.Name `xml:"data-protocols"` - DataProtocolPtr []DataProtocolType `xml:"data-protocol"` -} - -// DataProtocol is a 'getter' method -func (o *NetInterfaceInfoTypeDataProtocols) DataProtocol() []DataProtocolType { - r := o.DataProtocolPtr - return r -} - -// SetDataProtocol is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoTypeDataProtocols) SetDataProtocol(newValue []DataProtocolType) *NetInterfaceInfoTypeDataProtocols { - newSlice := make([]DataProtocolType, len(newValue)) - copy(newSlice, newValue) - o.DataProtocolPtr = newSlice - return o -} - -// DataProtocols is a 'getter' method -func (o *NetInterfaceInfoType) DataProtocols() NetInterfaceInfoTypeDataProtocols { - r := *o.DataProtocolsPtr - return r -} - -// SetDataProtocols is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetDataProtocols(newValue NetInterfaceInfoTypeDataProtocols) *NetInterfaceInfoType { - o.DataProtocolsPtr = &newValue - return o -} - -// DnsDomainName is a 'getter' method -func (o *NetInterfaceInfoType) DnsDomainName() DnsZoneType { - r := *o.DnsDomainNamePtr - return r -} - -// SetDnsDomainName is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetDnsDomainName(newValue DnsZoneType) *NetInterfaceInfoType { - o.DnsDomainNamePtr = &newValue - return o -} - -// ExtendedStatus is a 'getter' method -func (o *NetInterfaceInfoType) ExtendedStatus() string { - r := *o.ExtendedStatusPtr - return r -} - -// SetExtendedStatus is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetExtendedStatus(newValue string) *NetInterfaceInfoType { - o.ExtendedStatusPtr = &newValue - return o -} - -// FailoverGroup is a 'getter' method -func (o *NetInterfaceInfoType) FailoverGroup() FailoverGroupType { - r := *o.FailoverGroupPtr - return r -} - -// SetFailoverGroup is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetFailoverGroup(newValue FailoverGroupType) *NetInterfaceInfoType { - o.FailoverGroupPtr = &newValue - return o -} - -// FailoverPolicy is a 'getter' method -func (o *NetInterfaceInfoType) FailoverPolicy() string { - r := *o.FailoverPolicyPtr - return r -} - -// SetFailoverPolicy is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetFailoverPolicy(newValue string) *NetInterfaceInfoType { - o.FailoverPolicyPtr = &newValue - return o -} - -// FirewallPolicy is a 'getter' method -func (o *NetInterfaceInfoType) FirewallPolicy() string { - r := *o.FirewallPolicyPtr - return r -} - -// SetFirewallPolicy is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetFirewallPolicy(newValue string) *NetInterfaceInfoType { - o.FirewallPolicyPtr = &newValue - return o -} - -// ForceSubnetAssociation is a 'getter' method -func (o *NetInterfaceInfoType) ForceSubnetAssociation() bool { - r := *o.ForceSubnetAssociationPtr - return r -} - -// SetForceSubnetAssociation is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetForceSubnetAssociation(newValue bool) *NetInterfaceInfoType { - o.ForceSubnetAssociationPtr = &newValue - return o -} - -// HomeNode is a 'getter' method -func (o *NetInterfaceInfoType) HomeNode() string { - r := *o.HomeNodePtr - return r -} - -// SetHomeNode is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetHomeNode(newValue string) *NetInterfaceInfoType { - o.HomeNodePtr = &newValue - return o -} - -// HomePort is a 'getter' method -func (o *NetInterfaceInfoType) HomePort() string { - r := *o.HomePortPtr - return r -} - -// SetHomePort is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetHomePort(newValue string) *NetInterfaceInfoType { - o.HomePortPtr = &newValue - return o -} - -// InterfaceName is a 'getter' method -func (o *NetInterfaceInfoType) InterfaceName() string { - r := *o.InterfaceNamePtr - return r -} - -// SetInterfaceName is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetInterfaceName(newValue string) *NetInterfaceInfoType { - o.InterfaceNamePtr = &newValue - return o -} - -// Ipspace is a 'getter' method -func (o *NetInterfaceInfoType) Ipspace() string { - r := *o.IpspacePtr - return r -} - -// SetIpspace is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetIpspace(newValue string) *NetInterfaceInfoType { - o.IpspacePtr = &newValue - return o -} - -// IsAutoRevert is a 'getter' method -func (o *NetInterfaceInfoType) IsAutoRevert() bool { - r := *o.IsAutoRevertPtr - return r -} - -// SetIsAutoRevert is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetIsAutoRevert(newValue bool) *NetInterfaceInfoType { - o.IsAutoRevertPtr = &newValue - return o -} - -// IsDnsUpdateEnabled is a 'getter' method -func (o *NetInterfaceInfoType) IsDnsUpdateEnabled() bool { - r := *o.IsDnsUpdateEnabledPtr - return r -} - -// SetIsDnsUpdateEnabled is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetIsDnsUpdateEnabled(newValue bool) *NetInterfaceInfoType { - o.IsDnsUpdateEnabledPtr = &newValue - return o -} - -// IsHome is a 'getter' method -func (o *NetInterfaceInfoType) IsHome() bool { - r := *o.IsHomePtr - return r -} - -// SetIsHome is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetIsHome(newValue bool) *NetInterfaceInfoType { - o.IsHomePtr = &newValue - return o -} - -// IsIpv4LinkLocal is a 'getter' method -func (o *NetInterfaceInfoType) IsIpv4LinkLocal() bool { - r := *o.IsIpv4LinkLocalPtr - return r -} - -// SetIsIpv4LinkLocal is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetIsIpv4LinkLocal(newValue bool) *NetInterfaceInfoType { - o.IsIpv4LinkLocalPtr = &newValue - return o -} - -// LifUuid is a 'getter' method -func (o *NetInterfaceInfoType) LifUuid() UuidType { - r := *o.LifUuidPtr - return r -} - -// SetLifUuid is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetLifUuid(newValue UuidType) *NetInterfaceInfoType { - o.LifUuidPtr = &newValue - return o -} - -// ListenForDnsQuery is a 'getter' method -func (o *NetInterfaceInfoType) ListenForDnsQuery() bool { - r := *o.ListenForDnsQueryPtr - return r -} - -// SetListenForDnsQuery is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetListenForDnsQuery(newValue bool) *NetInterfaceInfoType { - o.ListenForDnsQueryPtr = &newValue - return o -} - -// Netmask is a 'getter' method -func (o *NetInterfaceInfoType) Netmask() IpAddressType { - r := *o.NetmaskPtr - return r -} - -// SetNetmask is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetNetmask(newValue IpAddressType) *NetInterfaceInfoType { - o.NetmaskPtr = &newValue - return o -} - -// NetmaskLength is a 'getter' method -func (o *NetInterfaceInfoType) NetmaskLength() int { - r := *o.NetmaskLengthPtr - return r -} - -// SetNetmaskLength is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetNetmaskLength(newValue int) *NetInterfaceInfoType { - o.NetmaskLengthPtr = &newValue - return o -} - -// OperationalStatus is a 'getter' method -func (o *NetInterfaceInfoType) OperationalStatus() string { - r := *o.OperationalStatusPtr - return r -} - -// SetOperationalStatus is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetOperationalStatus(newValue string) *NetInterfaceInfoType { - o.OperationalStatusPtr = &newValue - return o -} - -// Role is a 'getter' method -func (o *NetInterfaceInfoType) Role() string { - r := *o.RolePtr - return r -} - -// SetRole is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetRole(newValue string) *NetInterfaceInfoType { - o.RolePtr = &newValue - return o -} - -// RoutingGroupName is a 'getter' method -func (o *NetInterfaceInfoType) RoutingGroupName() RoutingGroupTypeType { - r := *o.RoutingGroupNamePtr - return r -} - -// SetRoutingGroupName is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetRoutingGroupName(newValue RoutingGroupTypeType) *NetInterfaceInfoType { - o.RoutingGroupNamePtr = &newValue - return o -} - -// SubnetName is a 'getter' method -func (o *NetInterfaceInfoType) SubnetName() SubnetNameType { - r := *o.SubnetNamePtr - return r -} - -// SetSubnetName is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetSubnetName(newValue SubnetNameType) *NetInterfaceInfoType { - o.SubnetNamePtr = &newValue - return o -} - -// UseFailoverGroup is a 'getter' method -func (o *NetInterfaceInfoType) UseFailoverGroup() string { - r := *o.UseFailoverGroupPtr - return r -} - -// SetUseFailoverGroup is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetUseFailoverGroup(newValue string) *NetInterfaceInfoType { - o.UseFailoverGroupPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *NetInterfaceInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetVserver(newValue string) *NetInterfaceInfoType { - o.VserverPtr = &newValue - return o -} - -// Wwpn is a 'getter' method -func (o *NetInterfaceInfoType) Wwpn() string { - r := *o.WwpnPtr - return r -} - -// SetWwpn is a fluent style 'setter' method that can be chained -func (o *NetInterfaceInfoType) SetWwpn(newValue string) *NetInterfaceInfoType { - o.WwpnPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nis-domain.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nis-domain.go deleted file mode 100644 index b973f9dd2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nis-domain.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// NisDomainType is a structure to represent a nis-domain ZAPI object -type NisDomainType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nmswitch.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nmswitch.go deleted file mode 100644 index 953a1e875..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nmswitch.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// NmswitchType is a structure to represent a nmswitch ZAPI object -type NmswitchType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-details-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-details-info.go deleted file mode 100644 index 9dab957a5..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-details-info.go +++ /dev/null @@ -1,448 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// NodeDetailsInfoType is a structure to represent a node-details-info ZAPI object -type NodeDetailsInfoType struct { - XMLName xml.Name `xml:"node-details-info"` - CpuBusytimePtr *int `xml:"cpu-busytime"` - CpuFirmwareReleasePtr *string `xml:"cpu-firmware-release"` - EnvFailedFanCountPtr *int `xml:"env-failed-fan-count"` - EnvFailedFanMessagePtr *string `xml:"env-failed-fan-message"` - EnvFailedPowerSupplyCountPtr *int `xml:"env-failed-power-supply-count"` - EnvFailedPowerSupplyMessagePtr *string `xml:"env-failed-power-supply-message"` - EnvOverTemperaturePtr *bool `xml:"env-over-temperature"` - IsAllFlashOptimizedPtr *bool `xml:"is-all-flash-optimized"` - IsCloudOptimizedPtr *bool `xml:"is-cloud-optimized"` - IsDiffSvcsPtr *bool `xml:"is-diff-svcs"` - IsEpsilonNodePtr *bool `xml:"is-epsilon-node"` - IsNodeClusterEligiblePtr *bool `xml:"is-node-cluster-eligible"` - IsNodeHealthyPtr *bool `xml:"is-node-healthy"` - MaximumAggregateSizePtr *SizeType `xml:"maximum-aggregate-size"` - MaximumNumberOfVolumesPtr *int `xml:"maximum-number-of-volumes"` - MaximumVolumeSizePtr *SizeType `xml:"maximum-volume-size"` - NodePtr *NodeNameType `xml:"node"` - NodeAssetTagPtr *string `xml:"node-asset-tag"` - NodeLocationPtr *string `xml:"node-location"` - NodeModelPtr *string `xml:"node-model"` - NodeNvramIdPtr *int `xml:"node-nvram-id"` - NodeOwnerPtr *string `xml:"node-owner"` - NodeSerialNumberPtr *string `xml:"node-serial-number"` - NodeStorageConfigurationPtr *StorageConfigurationStateEnumType `xml:"node-storage-configuration"` - NodeSystemIdPtr *string `xml:"node-system-id"` - NodeUptimePtr *int `xml:"node-uptime"` - NodeUuidPtr *string `xml:"node-uuid"` - NodeVendorPtr *string `xml:"node-vendor"` - NvramBatteryStatusPtr *NvramBatteryStatusEnumType `xml:"nvram-battery-status"` - ProductVersionPtr *string `xml:"product-version"` - VmSystemDisksPtr *VmSystemDisksType `xml:"vm-system-disks"` - VmhostInfoPtr *VmhostInfoType `xml:"vmhost-info"` -} - -// NewNodeDetailsInfoType is a factory method for creating new instances of NodeDetailsInfoType objects -func NewNodeDetailsInfoType() *NodeDetailsInfoType { - return &NodeDetailsInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *NodeDetailsInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NodeDetailsInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// CpuBusytime is a 'getter' method -func (o *NodeDetailsInfoType) CpuBusytime() int { - r := *o.CpuBusytimePtr - return r -} - -// SetCpuBusytime is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetCpuBusytime(newValue int) *NodeDetailsInfoType { - o.CpuBusytimePtr = &newValue - return o -} - -// CpuFirmwareRelease is a 'getter' method -func (o *NodeDetailsInfoType) CpuFirmwareRelease() string { - r := *o.CpuFirmwareReleasePtr - return r -} - -// SetCpuFirmwareRelease is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetCpuFirmwareRelease(newValue string) *NodeDetailsInfoType { - o.CpuFirmwareReleasePtr = &newValue - return o -} - -// EnvFailedFanCount is a 'getter' method -func (o *NodeDetailsInfoType) EnvFailedFanCount() int { - r := *o.EnvFailedFanCountPtr - return r -} - -// SetEnvFailedFanCount is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetEnvFailedFanCount(newValue int) *NodeDetailsInfoType { - o.EnvFailedFanCountPtr = &newValue - return o -} - -// EnvFailedFanMessage is a 'getter' method -func (o *NodeDetailsInfoType) EnvFailedFanMessage() string { - r := *o.EnvFailedFanMessagePtr - return r -} - -// SetEnvFailedFanMessage is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetEnvFailedFanMessage(newValue string) *NodeDetailsInfoType { - o.EnvFailedFanMessagePtr = &newValue - return o -} - -// EnvFailedPowerSupplyCount is a 'getter' method -func (o *NodeDetailsInfoType) EnvFailedPowerSupplyCount() int { - r := *o.EnvFailedPowerSupplyCountPtr - return r -} - -// SetEnvFailedPowerSupplyCount is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetEnvFailedPowerSupplyCount(newValue int) *NodeDetailsInfoType { - o.EnvFailedPowerSupplyCountPtr = &newValue - return o -} - -// EnvFailedPowerSupplyMessage is a 'getter' method -func (o *NodeDetailsInfoType) EnvFailedPowerSupplyMessage() string { - r := *o.EnvFailedPowerSupplyMessagePtr - return r -} - -// SetEnvFailedPowerSupplyMessage is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetEnvFailedPowerSupplyMessage(newValue string) *NodeDetailsInfoType { - o.EnvFailedPowerSupplyMessagePtr = &newValue - return o -} - -// EnvOverTemperature is a 'getter' method -func (o *NodeDetailsInfoType) EnvOverTemperature() bool { - r := *o.EnvOverTemperaturePtr - return r -} - -// SetEnvOverTemperature is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetEnvOverTemperature(newValue bool) *NodeDetailsInfoType { - o.EnvOverTemperaturePtr = &newValue - return o -} - -// IsAllFlashOptimized is a 'getter' method -func (o *NodeDetailsInfoType) IsAllFlashOptimized() bool { - r := *o.IsAllFlashOptimizedPtr - return r -} - -// SetIsAllFlashOptimized is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetIsAllFlashOptimized(newValue bool) *NodeDetailsInfoType { - o.IsAllFlashOptimizedPtr = &newValue - return o -} - -// IsCloudOptimized is a 'getter' method -func (o *NodeDetailsInfoType) IsCloudOptimized() bool { - r := *o.IsCloudOptimizedPtr - return r -} - -// SetIsCloudOptimized is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetIsCloudOptimized(newValue bool) *NodeDetailsInfoType { - o.IsCloudOptimizedPtr = &newValue - return o -} - -// IsDiffSvcs is a 'getter' method -func (o *NodeDetailsInfoType) IsDiffSvcs() bool { - r := *o.IsDiffSvcsPtr - return r -} - -// SetIsDiffSvcs is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetIsDiffSvcs(newValue bool) *NodeDetailsInfoType { - o.IsDiffSvcsPtr = &newValue - return o -} - -// IsEpsilonNode is a 'getter' method -func (o *NodeDetailsInfoType) IsEpsilonNode() bool { - r := *o.IsEpsilonNodePtr - return r -} - -// SetIsEpsilonNode is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetIsEpsilonNode(newValue bool) *NodeDetailsInfoType { - o.IsEpsilonNodePtr = &newValue - return o -} - -// IsNodeClusterEligible is a 'getter' method -func (o *NodeDetailsInfoType) IsNodeClusterEligible() bool { - r := *o.IsNodeClusterEligiblePtr - return r -} - -// SetIsNodeClusterEligible is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetIsNodeClusterEligible(newValue bool) *NodeDetailsInfoType { - o.IsNodeClusterEligiblePtr = &newValue - return o -} - -// IsNodeHealthy is a 'getter' method -func (o *NodeDetailsInfoType) IsNodeHealthy() bool { - r := *o.IsNodeHealthyPtr - return r -} - -// SetIsNodeHealthy is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetIsNodeHealthy(newValue bool) *NodeDetailsInfoType { - o.IsNodeHealthyPtr = &newValue - return o -} - -// MaximumAggregateSize is a 'getter' method -func (o *NodeDetailsInfoType) MaximumAggregateSize() SizeType { - r := *o.MaximumAggregateSizePtr - return r -} - -// SetMaximumAggregateSize is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetMaximumAggregateSize(newValue SizeType) *NodeDetailsInfoType { - o.MaximumAggregateSizePtr = &newValue - return o -} - -// MaximumNumberOfVolumes is a 'getter' method -func (o *NodeDetailsInfoType) MaximumNumberOfVolumes() int { - r := *o.MaximumNumberOfVolumesPtr - return r -} - -// SetMaximumNumberOfVolumes is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetMaximumNumberOfVolumes(newValue int) *NodeDetailsInfoType { - o.MaximumNumberOfVolumesPtr = &newValue - return o -} - -// MaximumVolumeSize is a 'getter' method -func (o *NodeDetailsInfoType) MaximumVolumeSize() SizeType { - r := *o.MaximumVolumeSizePtr - return r -} - -// SetMaximumVolumeSize is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetMaximumVolumeSize(newValue SizeType) *NodeDetailsInfoType { - o.MaximumVolumeSizePtr = &newValue - return o -} - -// Node is a 'getter' method -func (o *NodeDetailsInfoType) Node() NodeNameType { - r := *o.NodePtr - return r -} - -// SetNode is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNode(newValue NodeNameType) *NodeDetailsInfoType { - o.NodePtr = &newValue - return o -} - -// NodeAssetTag is a 'getter' method -func (o *NodeDetailsInfoType) NodeAssetTag() string { - r := *o.NodeAssetTagPtr - return r -} - -// SetNodeAssetTag is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeAssetTag(newValue string) *NodeDetailsInfoType { - o.NodeAssetTagPtr = &newValue - return o -} - -// NodeLocation is a 'getter' method -func (o *NodeDetailsInfoType) NodeLocation() string { - r := *o.NodeLocationPtr - return r -} - -// SetNodeLocation is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeLocation(newValue string) *NodeDetailsInfoType { - o.NodeLocationPtr = &newValue - return o -} - -// NodeModel is a 'getter' method -func (o *NodeDetailsInfoType) NodeModel() string { - r := *o.NodeModelPtr - return r -} - -// SetNodeModel is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeModel(newValue string) *NodeDetailsInfoType { - o.NodeModelPtr = &newValue - return o -} - -// NodeNvramId is a 'getter' method -func (o *NodeDetailsInfoType) NodeNvramId() int { - r := *o.NodeNvramIdPtr - return r -} - -// SetNodeNvramId is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeNvramId(newValue int) *NodeDetailsInfoType { - o.NodeNvramIdPtr = &newValue - return o -} - -// NodeOwner is a 'getter' method -func (o *NodeDetailsInfoType) NodeOwner() string { - r := *o.NodeOwnerPtr - return r -} - -// SetNodeOwner is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeOwner(newValue string) *NodeDetailsInfoType { - o.NodeOwnerPtr = &newValue - return o -} - -// NodeSerialNumber is a 'getter' method -func (o *NodeDetailsInfoType) NodeSerialNumber() string { - r := *o.NodeSerialNumberPtr - return r -} - -// SetNodeSerialNumber is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeSerialNumber(newValue string) *NodeDetailsInfoType { - o.NodeSerialNumberPtr = &newValue - return o -} - -// NodeStorageConfiguration is a 'getter' method -func (o *NodeDetailsInfoType) NodeStorageConfiguration() StorageConfigurationStateEnumType { - r := *o.NodeStorageConfigurationPtr - return r -} - -// SetNodeStorageConfiguration is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeStorageConfiguration(newValue StorageConfigurationStateEnumType) *NodeDetailsInfoType { - o.NodeStorageConfigurationPtr = &newValue - return o -} - -// NodeSystemId is a 'getter' method -func (o *NodeDetailsInfoType) NodeSystemId() string { - r := *o.NodeSystemIdPtr - return r -} - -// SetNodeSystemId is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeSystemId(newValue string) *NodeDetailsInfoType { - o.NodeSystemIdPtr = &newValue - return o -} - -// NodeUptime is a 'getter' method -func (o *NodeDetailsInfoType) NodeUptime() int { - r := *o.NodeUptimePtr - return r -} - -// SetNodeUptime is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeUptime(newValue int) *NodeDetailsInfoType { - o.NodeUptimePtr = &newValue - return o -} - -// NodeUuid is a 'getter' method -func (o *NodeDetailsInfoType) NodeUuid() string { - r := *o.NodeUuidPtr - return r -} - -// SetNodeUuid is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeUuid(newValue string) *NodeDetailsInfoType { - o.NodeUuidPtr = &newValue - return o -} - -// NodeVendor is a 'getter' method -func (o *NodeDetailsInfoType) NodeVendor() string { - r := *o.NodeVendorPtr - return r -} - -// SetNodeVendor is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNodeVendor(newValue string) *NodeDetailsInfoType { - o.NodeVendorPtr = &newValue - return o -} - -// NvramBatteryStatus is a 'getter' method -func (o *NodeDetailsInfoType) NvramBatteryStatus() NvramBatteryStatusEnumType { - r := *o.NvramBatteryStatusPtr - return r -} - -// SetNvramBatteryStatus is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetNvramBatteryStatus(newValue NvramBatteryStatusEnumType) *NodeDetailsInfoType { - o.NvramBatteryStatusPtr = &newValue - return o -} - -// ProductVersion is a 'getter' method -func (o *NodeDetailsInfoType) ProductVersion() string { - r := *o.ProductVersionPtr - return r -} - -// SetProductVersion is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetProductVersion(newValue string) *NodeDetailsInfoType { - o.ProductVersionPtr = &newValue - return o -} - -// VmSystemDisks is a 'getter' method -func (o *NodeDetailsInfoType) VmSystemDisks() VmSystemDisksType { - r := *o.VmSystemDisksPtr - return r -} - -// SetVmSystemDisks is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetVmSystemDisks(newValue VmSystemDisksType) *NodeDetailsInfoType { - o.VmSystemDisksPtr = &newValue - return o -} - -// VmhostInfo is a 'getter' method -func (o *NodeDetailsInfoType) VmhostInfo() VmhostInfoType { - r := *o.VmhostInfoPtr - return r -} - -// SetVmhostInfo is a fluent style 'setter' method that can be chained -func (o *NodeDetailsInfoType) SetVmhostInfo(newValue VmhostInfoType) *NodeDetailsInfoType { - o.VmhostInfoPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-name.go deleted file mode 100644 index f9a184eca..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-name.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// NodeNameType is a structure to represent a node-name ZAPI object -type NodeNameType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-ontapi-detail-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-ontapi-detail-info.go deleted file mode 100644 index 8a4971a83..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-ontapi-detail-info.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// NodeOntapiDetailInfoType is a structure to represent a node-ontapi-detail-info ZAPI object -type NodeOntapiDetailInfoType struct { - XMLName xml.Name `xml:"node-ontapi-detail-info"` - MajorVersionPtr *int `xml:"major-version"` - MinorVersionPtr *int `xml:"minor-version"` - NodeNamePtr *string `xml:"node-name"` - NodeUuidPtr *string `xml:"node-uuid"` -} - -// NewNodeOntapiDetailInfoType is a factory method for creating new instances of NodeOntapiDetailInfoType objects -func NewNodeOntapiDetailInfoType() *NodeOntapiDetailInfoType { - return &NodeOntapiDetailInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *NodeOntapiDetailInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NodeOntapiDetailInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// MajorVersion is a 'getter' method -func (o *NodeOntapiDetailInfoType) MajorVersion() int { - r := *o.MajorVersionPtr - return r -} - -// SetMajorVersion is a fluent style 'setter' method that can be chained -func (o *NodeOntapiDetailInfoType) SetMajorVersion(newValue int) *NodeOntapiDetailInfoType { - o.MajorVersionPtr = &newValue - return o -} - -// MinorVersion is a 'getter' method -func (o *NodeOntapiDetailInfoType) MinorVersion() int { - r := *o.MinorVersionPtr - return r -} - -// SetMinorVersion is a fluent style 'setter' method that can be chained -func (o *NodeOntapiDetailInfoType) SetMinorVersion(newValue int) *NodeOntapiDetailInfoType { - o.MinorVersionPtr = &newValue - return o -} - -// NodeName is a 'getter' method -func (o *NodeOntapiDetailInfoType) NodeName() string { - r := *o.NodeNamePtr - return r -} - -// SetNodeName is a fluent style 'setter' method that can be chained -func (o *NodeOntapiDetailInfoType) SetNodeName(newValue string) *NodeOntapiDetailInfoType { - o.NodeNamePtr = &newValue - return o -} - -// NodeUuid is a 'getter' method -func (o *NodeOntapiDetailInfoType) NodeUuid() string { - r := *o.NodeUuidPtr - return r -} - -// SetNodeUuid is a fluent style 'setter' method that can be chained -func (o *NodeOntapiDetailInfoType) SetNodeUuid(newValue string) *NodeOntapiDetailInfoType { - o.NodeUuidPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-version-detail-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-version-detail-info.go deleted file mode 100644 index 0d79e15ad..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-node-version-detail-info.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// NodeVersionDetailInfoType is a structure to represent a node-version-detail-info ZAPI object -type NodeVersionDetailInfoType struct { - XMLName xml.Name `xml:"node-version-detail-info"` - BuildTimestampPtr *int `xml:"build-timestamp"` - NodeNamePtr *string `xml:"node-name"` - NodeUuidPtr *string `xml:"node-uuid"` - VersionPtr *string `xml:"version"` -} - -// NewNodeVersionDetailInfoType is a factory method for creating new instances of NodeVersionDetailInfoType objects -func NewNodeVersionDetailInfoType() *NodeVersionDetailInfoType { - return &NodeVersionDetailInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *NodeVersionDetailInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o NodeVersionDetailInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BuildTimestamp is a 'getter' method -func (o *NodeVersionDetailInfoType) BuildTimestamp() int { - r := *o.BuildTimestampPtr - return r -} - -// SetBuildTimestamp is a fluent style 'setter' method that can be chained -func (o *NodeVersionDetailInfoType) SetBuildTimestamp(newValue int) *NodeVersionDetailInfoType { - o.BuildTimestampPtr = &newValue - return o -} - -// NodeName is a 'getter' method -func (o *NodeVersionDetailInfoType) NodeName() string { - r := *o.NodeNamePtr - return r -} - -// SetNodeName is a fluent style 'setter' method that can be chained -func (o *NodeVersionDetailInfoType) SetNodeName(newValue string) *NodeVersionDetailInfoType { - o.NodeNamePtr = &newValue - return o -} - -// NodeUuid is a 'getter' method -func (o *NodeVersionDetailInfoType) NodeUuid() string { - r := *o.NodeUuidPtr - return r -} - -// SetNodeUuid is a fluent style 'setter' method that can be chained -func (o *NodeVersionDetailInfoType) SetNodeUuid(newValue string) *NodeVersionDetailInfoType { - o.NodeUuidPtr = &newValue - return o -} - -// Version is a 'getter' method -func (o *NodeVersionDetailInfoType) Version() string { - r := *o.VersionPtr - return r -} - -// SetVersion is a fluent style 'setter' method that can be chained -func (o *NodeVersionDetailInfoType) SetVersion(newValue string) *NodeVersionDetailInfoType { - o.VersionPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nsswitch.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nsswitch.go deleted file mode 100644 index 29e23d7b4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nsswitch.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// NsswitchType is a structure to represent a nsswitch ZAPI object -type NsswitchType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nvram-battery-status-enum.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nvram-battery-status-enum.go deleted file mode 100644 index abbc22f37..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-nvram-battery-status-enum.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// NvramBatteryStatusEnumType is a structure to represent a nvram-battery-status-enum ZAPI object -type NvramBatteryStatusEnumType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-plex-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-plex-attributes.go deleted file mode 100644 index 7ce203d14..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-plex-attributes.go +++ /dev/null @@ -1,170 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// PlexAttributesType is a structure to represent a plex-attributes ZAPI object -type PlexAttributesType struct { - XMLName xml.Name `xml:"plex-attributes"` - IsOnlinePtr *bool `xml:"is-online"` - IsResyncingPtr *bool `xml:"is-resyncing"` - PlexNamePtr *string `xml:"plex-name"` - PlexResyncPctWithPrecisionPtr *string `xml:"plex-resync-pct-with-precision"` - PlexStatusPtr *string `xml:"plex-status"` - PoolPtr *int `xml:"pool"` - RaidgroupsPtr *PlexAttributesTypeRaidgroups `xml:"raidgroups"` - // work in progress - ResyncLevelPtr *int `xml:"resync-level"` - ResyncingPercentagePtr *int `xml:"resyncing-percentage"` -} - -// NewPlexAttributesType is a factory method for creating new instances of PlexAttributesType objects -func NewPlexAttributesType() *PlexAttributesType { - return &PlexAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *PlexAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o PlexAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsOnline is a 'getter' method -func (o *PlexAttributesType) IsOnline() bool { - r := *o.IsOnlinePtr - return r -} - -// SetIsOnline is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetIsOnline(newValue bool) *PlexAttributesType { - o.IsOnlinePtr = &newValue - return o -} - -// IsResyncing is a 'getter' method -func (o *PlexAttributesType) IsResyncing() bool { - r := *o.IsResyncingPtr - return r -} - -// SetIsResyncing is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetIsResyncing(newValue bool) *PlexAttributesType { - o.IsResyncingPtr = &newValue - return o -} - -// PlexName is a 'getter' method -func (o *PlexAttributesType) PlexName() string { - r := *o.PlexNamePtr - return r -} - -// SetPlexName is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetPlexName(newValue string) *PlexAttributesType { - o.PlexNamePtr = &newValue - return o -} - -// PlexResyncPctWithPrecision is a 'getter' method -func (o *PlexAttributesType) PlexResyncPctWithPrecision() string { - r := *o.PlexResyncPctWithPrecisionPtr - return r -} - -// SetPlexResyncPctWithPrecision is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetPlexResyncPctWithPrecision(newValue string) *PlexAttributesType { - o.PlexResyncPctWithPrecisionPtr = &newValue - return o -} - -// PlexStatus is a 'getter' method -func (o *PlexAttributesType) PlexStatus() string { - r := *o.PlexStatusPtr - return r -} - -// SetPlexStatus is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetPlexStatus(newValue string) *PlexAttributesType { - o.PlexStatusPtr = &newValue - return o -} - -// Pool is a 'getter' method -func (o *PlexAttributesType) Pool() int { - r := *o.PoolPtr - return r -} - -// SetPool is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetPool(newValue int) *PlexAttributesType { - o.PoolPtr = &newValue - return o -} - -// PlexAttributesTypeRaidgroups is a wrapper -type PlexAttributesTypeRaidgroups struct { - XMLName xml.Name `xml:"raidgroups"` - RaidgroupAttributesPtr []RaidgroupAttributesType `xml:"raidgroup-attributes"` -} - -// RaidgroupAttributes is a 'getter' method -func (o *PlexAttributesTypeRaidgroups) RaidgroupAttributes() []RaidgroupAttributesType { - r := o.RaidgroupAttributesPtr - return r -} - -// SetRaidgroupAttributes is a fluent style 'setter' method that can be chained -func (o *PlexAttributesTypeRaidgroups) SetRaidgroupAttributes(newValue []RaidgroupAttributesType) *PlexAttributesTypeRaidgroups { - newSlice := make([]RaidgroupAttributesType, len(newValue)) - copy(newSlice, newValue) - o.RaidgroupAttributesPtr = newSlice - return o -} - -// Raidgroups is a 'getter' method -func (o *PlexAttributesType) Raidgroups() PlexAttributesTypeRaidgroups { - r := *o.RaidgroupsPtr - return r -} - -// SetRaidgroups is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetRaidgroups(newValue PlexAttributesTypeRaidgroups) *PlexAttributesType { - o.RaidgroupsPtr = &newValue - return o -} - -// ResyncLevel is a 'getter' method -func (o *PlexAttributesType) ResyncLevel() int { - r := *o.ResyncLevelPtr - return r -} - -// SetResyncLevel is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetResyncLevel(newValue int) *PlexAttributesType { - o.ResyncLevelPtr = &newValue - return o -} - -// ResyncingPercentage is a 'getter' method -func (o *PlexAttributesType) ResyncingPercentage() int { - r := *o.ResyncingPercentagePtr - return r -} - -// SetResyncingPercentage is a fluent style 'setter' method that can be chained -func (o *PlexAttributesType) SetResyncingPercentage(newValue int) *PlexAttributesType { - o.ResyncingPercentagePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-protocol.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-protocol.go deleted file mode 100644 index 8bf7a9b32..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-protocol.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// ProtocolType is a structure to represent a protocol ZAPI object -type ProtocolType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-qtree-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-qtree-info.go deleted file mode 100644 index 26d1e35f0..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-qtree-info.go +++ /dev/null @@ -1,162 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QtreeInfoType is a structure to represent a qtree-info ZAPI object -type QtreeInfoType struct { - XMLName xml.Name `xml:"qtree-info"` - ExportPolicyPtr *string `xml:"export-policy"` - IdPtr *int `xml:"id"` - IsExportPolicyInheritedPtr *bool `xml:"is-export-policy-inherited"` - ModePtr *string `xml:"mode"` - OplocksPtr *string `xml:"oplocks"` - QtreePtr *string `xml:"qtree"` - SecurityStylePtr *string `xml:"security-style"` - StatusPtr *string `xml:"status"` - VolumePtr *string `xml:"volume"` - VserverPtr *string `xml:"vserver"` -} - -// NewQtreeInfoType is a factory method for creating new instances of QtreeInfoType objects -func NewQtreeInfoType() *QtreeInfoType { - return &QtreeInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *QtreeInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QtreeInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExportPolicy is a 'getter' method -func (o *QtreeInfoType) ExportPolicy() string { - r := *o.ExportPolicyPtr - return r -} - -// SetExportPolicy is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetExportPolicy(newValue string) *QtreeInfoType { - o.ExportPolicyPtr = &newValue - return o -} - -// Id is a 'getter' method -func (o *QtreeInfoType) Id() int { - r := *o.IdPtr - return r -} - -// SetId is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetId(newValue int) *QtreeInfoType { - o.IdPtr = &newValue - return o -} - -// IsExportPolicyInherited is a 'getter' method -func (o *QtreeInfoType) IsExportPolicyInherited() bool { - r := *o.IsExportPolicyInheritedPtr - return r -} - -// SetIsExportPolicyInherited is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetIsExportPolicyInherited(newValue bool) *QtreeInfoType { - o.IsExportPolicyInheritedPtr = &newValue - return o -} - -// Mode is a 'getter' method -func (o *QtreeInfoType) Mode() string { - r := *o.ModePtr - return r -} - -// SetMode is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetMode(newValue string) *QtreeInfoType { - o.ModePtr = &newValue - return o -} - -// Oplocks is a 'getter' method -func (o *QtreeInfoType) Oplocks() string { - r := *o.OplocksPtr - return r -} - -// SetOplocks is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetOplocks(newValue string) *QtreeInfoType { - o.OplocksPtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *QtreeInfoType) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetQtree(newValue string) *QtreeInfoType { - o.QtreePtr = &newValue - return o -} - -// SecurityStyle is a 'getter' method -func (o *QtreeInfoType) SecurityStyle() string { - r := *o.SecurityStylePtr - return r -} - -// SetSecurityStyle is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetSecurityStyle(newValue string) *QtreeInfoType { - o.SecurityStylePtr = &newValue - return o -} - -// Status is a 'getter' method -func (o *QtreeInfoType) Status() string { - r := *o.StatusPtr - return r -} - -// SetStatus is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetStatus(newValue string) *QtreeInfoType { - o.StatusPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *QtreeInfoType) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetVolume(newValue string) *QtreeInfoType { - o.VolumePtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *QtreeInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *QtreeInfoType) SetVserver(newValue string) *QtreeInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-quota-entry.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-quota-entry.go deleted file mode 100644 index 38691a64d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-quota-entry.go +++ /dev/null @@ -1,188 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// QuotaEntryType is a structure to represent a quota-entry ZAPI object -type QuotaEntryType struct { - XMLName xml.Name `xml:"quota-entry"` - DiskLimitPtr *string `xml:"disk-limit"` - FileLimitPtr *string `xml:"file-limit"` - PerformUserMappingPtr *bool `xml:"perform-user-mapping"` - PolicyPtr *string `xml:"policy"` - QtreePtr *string `xml:"qtree"` - QuotaTargetPtr *string `xml:"quota-target"` - QuotaTypePtr *string `xml:"quota-type"` - SoftDiskLimitPtr *string `xml:"soft-disk-limit"` - SoftFileLimitPtr *string `xml:"soft-file-limit"` - ThresholdPtr *string `xml:"threshold"` - VolumePtr *string `xml:"volume"` - VserverPtr *string `xml:"vserver"` -} - -// NewQuotaEntryType is a factory method for creating new instances of QuotaEntryType objects -func NewQuotaEntryType() *QuotaEntryType { - return &QuotaEntryType{} -} - -// ToXML converts this object into an xml string representation -func (o *QuotaEntryType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o QuotaEntryType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// DiskLimit is a 'getter' method -func (o *QuotaEntryType) DiskLimit() string { - r := *o.DiskLimitPtr - return r -} - -// SetDiskLimit is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetDiskLimit(newValue string) *QuotaEntryType { - o.DiskLimitPtr = &newValue - return o -} - -// FileLimit is a 'getter' method -func (o *QuotaEntryType) FileLimit() string { - r := *o.FileLimitPtr - return r -} - -// SetFileLimit is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetFileLimit(newValue string) *QuotaEntryType { - o.FileLimitPtr = &newValue - return o -} - -// PerformUserMapping is a 'getter' method -func (o *QuotaEntryType) PerformUserMapping() bool { - r := *o.PerformUserMappingPtr - return r -} - -// SetPerformUserMapping is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetPerformUserMapping(newValue bool) *QuotaEntryType { - o.PerformUserMappingPtr = &newValue - return o -} - -// Policy is a 'getter' method -func (o *QuotaEntryType) Policy() string { - r := *o.PolicyPtr - return r -} - -// SetPolicy is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetPolicy(newValue string) *QuotaEntryType { - o.PolicyPtr = &newValue - return o -} - -// Qtree is a 'getter' method -func (o *QuotaEntryType) Qtree() string { - r := *o.QtreePtr - return r -} - -// SetQtree is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetQtree(newValue string) *QuotaEntryType { - o.QtreePtr = &newValue - return o -} - -// QuotaTarget is a 'getter' method -func (o *QuotaEntryType) QuotaTarget() string { - r := *o.QuotaTargetPtr - return r -} - -// SetQuotaTarget is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetQuotaTarget(newValue string) *QuotaEntryType { - o.QuotaTargetPtr = &newValue - return o -} - -// QuotaType is a 'getter' method -func (o *QuotaEntryType) QuotaType() string { - r := *o.QuotaTypePtr - return r -} - -// SetQuotaType is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetQuotaType(newValue string) *QuotaEntryType { - o.QuotaTypePtr = &newValue - return o -} - -// SoftDiskLimit is a 'getter' method -func (o *QuotaEntryType) SoftDiskLimit() string { - r := *o.SoftDiskLimitPtr - return r -} - -// SetSoftDiskLimit is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetSoftDiskLimit(newValue string) *QuotaEntryType { - o.SoftDiskLimitPtr = &newValue - return o -} - -// SoftFileLimit is a 'getter' method -func (o *QuotaEntryType) SoftFileLimit() string { - r := *o.SoftFileLimitPtr - return r -} - -// SetSoftFileLimit is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetSoftFileLimit(newValue string) *QuotaEntryType { - o.SoftFileLimitPtr = &newValue - return o -} - -// Threshold is a 'getter' method -func (o *QuotaEntryType) Threshold() string { - r := *o.ThresholdPtr - return r -} - -// SetThreshold is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetThreshold(newValue string) *QuotaEntryType { - o.ThresholdPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *QuotaEntryType) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetVolume(newValue string) *QuotaEntryType { - o.VolumePtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *QuotaEntryType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *QuotaEntryType) SetVserver(newValue string) *QuotaEntryType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-raidgroup-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-raidgroup-attributes.go deleted file mode 100644 index 9259869be..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-raidgroup-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// RaidgroupAttributesType is a structure to represent a raidgroup-attributes ZAPI object -type RaidgroupAttributesType struct { - XMLName xml.Name `xml:"raidgroup-attributes"` - ChecksumStylePtr *string `xml:"checksum-style"` - IsCacheTierPtr *bool `xml:"is-cache-tier"` - IsRecomputingParityPtr *bool `xml:"is-recomputing-parity"` - IsReconstructingPtr *bool `xml:"is-reconstructing"` - RaidgroupNamePtr *string `xml:"raidgroup-name"` - RecomputingParityPercentagePtr *int `xml:"recomputing-parity-percentage"` - ReconstructionPercentagePtr *int `xml:"reconstruction-percentage"` -} - -// NewRaidgroupAttributesType is a factory method for creating new instances of RaidgroupAttributesType objects -func NewRaidgroupAttributesType() *RaidgroupAttributesType { - return &RaidgroupAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *RaidgroupAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o RaidgroupAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ChecksumStyle is a 'getter' method -func (o *RaidgroupAttributesType) ChecksumStyle() string { - r := *o.ChecksumStylePtr - return r -} - -// SetChecksumStyle is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetChecksumStyle(newValue string) *RaidgroupAttributesType { - o.ChecksumStylePtr = &newValue - return o -} - -// IsCacheTier is a 'getter' method -func (o *RaidgroupAttributesType) IsCacheTier() bool { - r := *o.IsCacheTierPtr - return r -} - -// SetIsCacheTier is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetIsCacheTier(newValue bool) *RaidgroupAttributesType { - o.IsCacheTierPtr = &newValue - return o -} - -// IsRecomputingParity is a 'getter' method -func (o *RaidgroupAttributesType) IsRecomputingParity() bool { - r := *o.IsRecomputingParityPtr - return r -} - -// SetIsRecomputingParity is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetIsRecomputingParity(newValue bool) *RaidgroupAttributesType { - o.IsRecomputingParityPtr = &newValue - return o -} - -// IsReconstructing is a 'getter' method -func (o *RaidgroupAttributesType) IsReconstructing() bool { - r := *o.IsReconstructingPtr - return r -} - -// SetIsReconstructing is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetIsReconstructing(newValue bool) *RaidgroupAttributesType { - o.IsReconstructingPtr = &newValue - return o -} - -// RaidgroupName is a 'getter' method -func (o *RaidgroupAttributesType) RaidgroupName() string { - r := *o.RaidgroupNamePtr - return r -} - -// SetRaidgroupName is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetRaidgroupName(newValue string) *RaidgroupAttributesType { - o.RaidgroupNamePtr = &newValue - return o -} - -// RecomputingParityPercentage is a 'getter' method -func (o *RaidgroupAttributesType) RecomputingParityPercentage() int { - r := *o.RecomputingParityPercentagePtr - return r -} - -// SetRecomputingParityPercentage is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetRecomputingParityPercentage(newValue int) *RaidgroupAttributesType { - o.RecomputingParityPercentagePtr = &newValue - return o -} - -// ReconstructionPercentage is a 'getter' method -func (o *RaidgroupAttributesType) ReconstructionPercentage() int { - r := *o.ReconstructionPercentagePtr - return r -} - -// SetReconstructionPercentage is a fluent style 'setter' method that can be chained -func (o *RaidgroupAttributesType) SetReconstructionPercentage(newValue int) *RaidgroupAttributesType { - o.ReconstructionPercentagePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-repos-constituent-role.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-repos-constituent-role.go deleted file mode 100644 index ef17064d2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-repos-constituent-role.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// ReposConstituentRoleType is a structure to represent a repos-constituent-role ZAPI object -type ReposConstituentRoleType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-routing-group-type.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-routing-group-type.go deleted file mode 100644 index 48008393e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-routing-group-type.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// RoutingGroupTypeType is a structure to represent a routing-group-type ZAPI object -type RoutingGroupTypeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-security-flavor.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-security-flavor.go deleted file mode 100644 index ac2f82427..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-security-flavor.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SecurityFlavorType is a structure to represent a security-flavor ZAPI object -type SecurityFlavorType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-security-style-enum.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-security-style-enum.go deleted file mode 100644 index fabe00926..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-security-style-enum.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SecurityStyleEnumType is a structure to represent a security-style-enum ZAPI object -type SecurityStyleEnumType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-show-aggregates.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-show-aggregates.go deleted file mode 100644 index 8df88e044..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-show-aggregates.go +++ /dev/null @@ -1,97 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// ShowAggregatesType is a structure to represent a show-aggregates ZAPI object -type ShowAggregatesType struct { - XMLName xml.Name `xml:"show-aggregates"` - AggregateNamePtr *AggrNameType `xml:"aggregate-name"` - AggregateTypePtr *AggregatetypeType `xml:"aggregate-type"` - AvailableSizePtr *SizeType `xml:"available-size"` - SnaplockTypePtr *SnaplocktypeType `xml:"snaplock-type"` - VserverNamePtr *string `xml:"vserver-name"` -} - -// NewShowAggregatesType is a factory method for creating new instances of ShowAggregatesType objects -func NewShowAggregatesType() *ShowAggregatesType { - return &ShowAggregatesType{} -} - -// ToXML converts this object into an xml string representation -func (o *ShowAggregatesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o ShowAggregatesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggregateName is a 'getter' method -func (o *ShowAggregatesType) AggregateName() AggrNameType { - r := *o.AggregateNamePtr - return r -} - -// SetAggregateName is a fluent style 'setter' method that can be chained -func (o *ShowAggregatesType) SetAggregateName(newValue AggrNameType) *ShowAggregatesType { - o.AggregateNamePtr = &newValue - return o -} - -// AggregateType is a 'getter' method -func (o *ShowAggregatesType) AggregateType() AggregatetypeType { - r := *o.AggregateTypePtr - return r -} - -// SetAggregateType is a fluent style 'setter' method that can be chained -func (o *ShowAggregatesType) SetAggregateType(newValue AggregatetypeType) *ShowAggregatesType { - o.AggregateTypePtr = &newValue - return o -} - -// AvailableSize is a 'getter' method -func (o *ShowAggregatesType) AvailableSize() SizeType { - r := *o.AvailableSizePtr - return r -} - -// SetAvailableSize is a fluent style 'setter' method that can be chained -func (o *ShowAggregatesType) SetAvailableSize(newValue SizeType) *ShowAggregatesType { - o.AvailableSizePtr = &newValue - return o -} - -// SnaplockType is a 'getter' method -func (o *ShowAggregatesType) SnaplockType() SnaplocktypeType { - r := *o.SnaplockTypePtr - return r -} - -// SetSnaplockType is a fluent style 'setter' method that can be chained -func (o *ShowAggregatesType) SetSnaplockType(newValue SnaplocktypeType) *ShowAggregatesType { - o.SnaplockTypePtr = &newValue - return o -} - -// VserverName is a 'getter' method -func (o *ShowAggregatesType) VserverName() string { - r := *o.VserverNamePtr - return r -} - -// SetVserverName is a fluent style 'setter' method that can be chained -func (o *ShowAggregatesType) SetVserverName(newValue string) *ShowAggregatesType { - o.VserverNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-size.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-size.go deleted file mode 100644 index b23cdf28f..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-size.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SizeType is a structure to represent a size ZAPI object -type SizeType = int diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snaplocktype.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snaplocktype.go deleted file mode 100644 index 7b6a9c488..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snaplocktype.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SnaplocktypeType is a structure to represent a snaplocktype ZAPI object -type SnaplocktypeType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapmirror-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapmirror-info.go deleted file mode 100644 index 2a2c3475d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapmirror-info.go +++ /dev/null @@ -1,893 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapmirrorInfoType is a structure to represent a snapmirror-info ZAPI object -type SnapmirrorInfoType struct { - XMLName xml.Name `xml:"snapmirror-info"` - BreakFailedCountPtr *int `xml:"break-failed-count"` - BreakSuccessfulCountPtr *int `xml:"break-successful-count"` - CurrentMaxTransferRatePtr *int `xml:"current-max-transfer-rate"` - CurrentOperationIdPtr *string `xml:"current-operation-id"` - CurrentTransferErrorPtr *string `xml:"current-transfer-error"` - CurrentTransferPriorityPtr *string `xml:"current-transfer-priority"` - CurrentTransferTypePtr *string `xml:"current-transfer-type"` - DestinationClusterPtr *string `xml:"destination-cluster"` - DestinationLocationPtr *string `xml:"destination-location"` - DestinationVolumePtr *string `xml:"destination-volume"` - DestinationVolumeNodePtr *string `xml:"destination-volume-node"` - DestinationVserverPtr *string `xml:"destination-vserver"` - DestinationVserverUuidPtr *string `xml:"destination-vserver-uuid"` - ExportedSnapshotPtr *string `xml:"exported-snapshot"` - ExportedSnapshotTimestampPtr *int `xml:"exported-snapshot-timestamp"` - FileRestoreFileCountPtr *int `xml:"file-restore-file-count"` - FileRestoreFileListPtr *SnapmirrorInfoTypeFileRestoreFileList `xml:"file-restore-file-list"` - // work in progress - IdentityPreservePtr *bool `xml:"identity-preserve"` - IsAutoExpandEnabledPtr *bool `xml:"is-auto-expand-enabled"` - IsConstituentPtr *bool `xml:"is-constituent"` - IsHealthyPtr *bool `xml:"is-healthy"` - LagTimePtr *int `xml:"lag-time"` - LastTransferDurationPtr *int `xml:"last-transfer-duration"` - LastTransferEndTimestampPtr *int `xml:"last-transfer-end-timestamp"` - LastTransferErrorPtr *string `xml:"last-transfer-error"` - LastTransferErrorCodesPtr *SnapmirrorInfoTypeLastTransferErrorCodes `xml:"last-transfer-error-codes"` - // work in progress - LastTransferFromPtr *string `xml:"last-transfer-from"` - LastTransferNetworkCompressionRatioPtr *string `xml:"last-transfer-network-compression-ratio"` - LastTransferSizePtr *int `xml:"last-transfer-size"` - LastTransferTypePtr *string `xml:"last-transfer-type"` - MaxTransferRatePtr *int `xml:"max-transfer-rate"` - MirrorStatePtr *string `xml:"mirror-state"` - NetworkCompressionRatioPtr *string `xml:"network-compression-ratio"` - NewestSnapshotPtr *string `xml:"newest-snapshot"` - NewestSnapshotTimestampPtr *int `xml:"newest-snapshot-timestamp"` - OpmaskPtr *int `xml:"opmask"` - PolicyPtr *string `xml:"policy"` - PolicyTypePtr *string `xml:"policy-type"` - ProgressLastUpdatedPtr *int `xml:"progress-last-updated"` - RelationshipControlPlanePtr *string `xml:"relationship-control-plane"` - RelationshipGroupTypePtr *string `xml:"relationship-group-type"` - RelationshipIdPtr *string `xml:"relationship-id"` - RelationshipProgressPtr *int `xml:"relationship-progress"` - RelationshipStatusPtr *string `xml:"relationship-status"` - RelationshipTypePtr *string `xml:"relationship-type"` - ResyncFailedCountPtr *int `xml:"resync-failed-count"` - ResyncSuccessfulCountPtr *int `xml:"resync-successful-count"` - SchedulePtr *string `xml:"schedule"` - SnapshotCheckpointPtr *int `xml:"snapshot-checkpoint"` - SnapshotProgressPtr *int `xml:"snapshot-progress"` - SourceClusterPtr *string `xml:"source-cluster"` - SourceLocationPtr *string `xml:"source-location"` - SourceVolumePtr *string `xml:"source-volume"` - SourceVserverPtr *string `xml:"source-vserver"` - SourceVserverUuidPtr *string `xml:"source-vserver-uuid"` - TotalTransferBytesPtr *int `xml:"total-transfer-bytes"` - TotalTransferTimeSecsPtr *int `xml:"total-transfer-time-secs"` - TransferSnapshotPtr *string `xml:"transfer-snapshot"` - TriesPtr *string `xml:"tries"` - UnhealthyReasonPtr *string `xml:"unhealthy-reason"` - UpdateFailedCountPtr *int `xml:"update-failed-count"` - UpdateSuccessfulCountPtr *int `xml:"update-successful-count"` - VserverPtr *string `xml:"vserver"` -} - -// NewSnapmirrorInfoType is a factory method for creating new instances of SnapmirrorInfoType objects -func NewSnapmirrorInfoType() *SnapmirrorInfoType { - return &SnapmirrorInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapmirrorInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapmirrorInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BreakFailedCount is a 'getter' method -func (o *SnapmirrorInfoType) BreakFailedCount() int { - r := *o.BreakFailedCountPtr - return r -} - -// SetBreakFailedCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetBreakFailedCount(newValue int) *SnapmirrorInfoType { - o.BreakFailedCountPtr = &newValue - return o -} - -// BreakSuccessfulCount is a 'getter' method -func (o *SnapmirrorInfoType) BreakSuccessfulCount() int { - r := *o.BreakSuccessfulCountPtr - return r -} - -// SetBreakSuccessfulCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetBreakSuccessfulCount(newValue int) *SnapmirrorInfoType { - o.BreakSuccessfulCountPtr = &newValue - return o -} - -// CurrentMaxTransferRate is a 'getter' method -func (o *SnapmirrorInfoType) CurrentMaxTransferRate() int { - r := *o.CurrentMaxTransferRatePtr - return r -} - -// SetCurrentMaxTransferRate is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetCurrentMaxTransferRate(newValue int) *SnapmirrorInfoType { - o.CurrentMaxTransferRatePtr = &newValue - return o -} - -// CurrentOperationId is a 'getter' method -func (o *SnapmirrorInfoType) CurrentOperationId() string { - r := *o.CurrentOperationIdPtr - return r -} - -// SetCurrentOperationId is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetCurrentOperationId(newValue string) *SnapmirrorInfoType { - o.CurrentOperationIdPtr = &newValue - return o -} - -// CurrentTransferError is a 'getter' method -func (o *SnapmirrorInfoType) CurrentTransferError() string { - r := *o.CurrentTransferErrorPtr - return r -} - -// SetCurrentTransferError is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetCurrentTransferError(newValue string) *SnapmirrorInfoType { - o.CurrentTransferErrorPtr = &newValue - return o -} - -// CurrentTransferPriority is a 'getter' method -func (o *SnapmirrorInfoType) CurrentTransferPriority() string { - r := *o.CurrentTransferPriorityPtr - return r -} - -// SetCurrentTransferPriority is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetCurrentTransferPriority(newValue string) *SnapmirrorInfoType { - o.CurrentTransferPriorityPtr = &newValue - return o -} - -// CurrentTransferType is a 'getter' method -func (o *SnapmirrorInfoType) CurrentTransferType() string { - r := *o.CurrentTransferTypePtr - return r -} - -// SetCurrentTransferType is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetCurrentTransferType(newValue string) *SnapmirrorInfoType { - o.CurrentTransferTypePtr = &newValue - return o -} - -// DestinationCluster is a 'getter' method -func (o *SnapmirrorInfoType) DestinationCluster() string { - r := *o.DestinationClusterPtr - return r -} - -// SetDestinationCluster is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetDestinationCluster(newValue string) *SnapmirrorInfoType { - o.DestinationClusterPtr = &newValue - return o -} - -// DestinationLocation is a 'getter' method -func (o *SnapmirrorInfoType) DestinationLocation() string { - r := *o.DestinationLocationPtr - return r -} - -// SetDestinationLocation is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetDestinationLocation(newValue string) *SnapmirrorInfoType { - o.DestinationLocationPtr = &newValue - return o -} - -// DestinationVolume is a 'getter' method -func (o *SnapmirrorInfoType) DestinationVolume() string { - r := *o.DestinationVolumePtr - return r -} - -// SetDestinationVolume is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetDestinationVolume(newValue string) *SnapmirrorInfoType { - o.DestinationVolumePtr = &newValue - return o -} - -// DestinationVolumeNode is a 'getter' method -func (o *SnapmirrorInfoType) DestinationVolumeNode() string { - r := *o.DestinationVolumeNodePtr - return r -} - -// SetDestinationVolumeNode is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetDestinationVolumeNode(newValue string) *SnapmirrorInfoType { - o.DestinationVolumeNodePtr = &newValue - return o -} - -// DestinationVserver is a 'getter' method -func (o *SnapmirrorInfoType) DestinationVserver() string { - r := *o.DestinationVserverPtr - return r -} - -// SetDestinationVserver is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetDestinationVserver(newValue string) *SnapmirrorInfoType { - o.DestinationVserverPtr = &newValue - return o -} - -// DestinationVserverUuid is a 'getter' method -func (o *SnapmirrorInfoType) DestinationVserverUuid() string { - r := *o.DestinationVserverUuidPtr - return r -} - -// SetDestinationVserverUuid is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetDestinationVserverUuid(newValue string) *SnapmirrorInfoType { - o.DestinationVserverUuidPtr = &newValue - return o -} - -// ExportedSnapshot is a 'getter' method -func (o *SnapmirrorInfoType) ExportedSnapshot() string { - r := *o.ExportedSnapshotPtr - return r -} - -// SetExportedSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetExportedSnapshot(newValue string) *SnapmirrorInfoType { - o.ExportedSnapshotPtr = &newValue - return o -} - -// ExportedSnapshotTimestamp is a 'getter' method -func (o *SnapmirrorInfoType) ExportedSnapshotTimestamp() int { - r := *o.ExportedSnapshotTimestampPtr - return r -} - -// SetExportedSnapshotTimestamp is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetExportedSnapshotTimestamp(newValue int) *SnapmirrorInfoType { - o.ExportedSnapshotTimestampPtr = &newValue - return o -} - -// FileRestoreFileCount is a 'getter' method -func (o *SnapmirrorInfoType) FileRestoreFileCount() int { - r := *o.FileRestoreFileCountPtr - return r -} - -// SetFileRestoreFileCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetFileRestoreFileCount(newValue int) *SnapmirrorInfoType { - o.FileRestoreFileCountPtr = &newValue - return o -} - -// SnapmirrorInfoTypeFileRestoreFileList is a wrapper -type SnapmirrorInfoTypeFileRestoreFileList struct { - XMLName xml.Name `xml:"file-restore-file-list"` - StringPtr []string `xml:"string"` -} - -// String is a 'getter' method -func (o *SnapmirrorInfoTypeFileRestoreFileList) String() []string { - r := o.StringPtr - return r -} - -// SetString is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoTypeFileRestoreFileList) SetString(newValue []string) *SnapmirrorInfoTypeFileRestoreFileList { - newSlice := make([]string, len(newValue)) - copy(newSlice, newValue) - o.StringPtr = newSlice - return o -} - -// FileRestoreFileList is a 'getter' method -func (o *SnapmirrorInfoType) FileRestoreFileList() SnapmirrorInfoTypeFileRestoreFileList { - r := *o.FileRestoreFileListPtr - return r -} - -// SetFileRestoreFileList is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetFileRestoreFileList(newValue SnapmirrorInfoTypeFileRestoreFileList) *SnapmirrorInfoType { - o.FileRestoreFileListPtr = &newValue - return o -} - -// IdentityPreserve is a 'getter' method -func (o *SnapmirrorInfoType) IdentityPreserve() bool { - r := *o.IdentityPreservePtr - return r -} - -// SetIdentityPreserve is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetIdentityPreserve(newValue bool) *SnapmirrorInfoType { - o.IdentityPreservePtr = &newValue - return o -} - -// IsAutoExpandEnabled is a 'getter' method -func (o *SnapmirrorInfoType) IsAutoExpandEnabled() bool { - r := *o.IsAutoExpandEnabledPtr - return r -} - -// SetIsAutoExpandEnabled is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetIsAutoExpandEnabled(newValue bool) *SnapmirrorInfoType { - o.IsAutoExpandEnabledPtr = &newValue - return o -} - -// IsConstituent is a 'getter' method -func (o *SnapmirrorInfoType) IsConstituent() bool { - r := *o.IsConstituentPtr - return r -} - -// SetIsConstituent is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetIsConstituent(newValue bool) *SnapmirrorInfoType { - o.IsConstituentPtr = &newValue - return o -} - -// IsHealthy is a 'getter' method -func (o *SnapmirrorInfoType) IsHealthy() bool { - r := *o.IsHealthyPtr - return r -} - -// SetIsHealthy is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetIsHealthy(newValue bool) *SnapmirrorInfoType { - o.IsHealthyPtr = &newValue - return o -} - -// LagTime is a 'getter' method -func (o *SnapmirrorInfoType) LagTime() int { - r := *o.LagTimePtr - return r -} - -// SetLagTime is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLagTime(newValue int) *SnapmirrorInfoType { - o.LagTimePtr = &newValue - return o -} - -// LastTransferDuration is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferDuration() int { - r := *o.LastTransferDurationPtr - return r -} - -// SetLastTransferDuration is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferDuration(newValue int) *SnapmirrorInfoType { - o.LastTransferDurationPtr = &newValue - return o -} - -// LastTransferEndTimestamp is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferEndTimestamp() int { - r := *o.LastTransferEndTimestampPtr - return r -} - -// SetLastTransferEndTimestamp is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferEndTimestamp(newValue int) *SnapmirrorInfoType { - o.LastTransferEndTimestampPtr = &newValue - return o -} - -// LastTransferError is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferError() string { - r := *o.LastTransferErrorPtr - return r -} - -// SetLastTransferError is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferError(newValue string) *SnapmirrorInfoType { - o.LastTransferErrorPtr = &newValue - return o -} - -// SnapmirrorInfoTypeLastTransferErrorCodes is a wrapper -type SnapmirrorInfoTypeLastTransferErrorCodes struct { - XMLName xml.Name `xml:"last-transfer-error-codes"` - IntegerPtr []int `xml:"integer"` -} - -// Integer is a 'getter' method -func (o *SnapmirrorInfoTypeLastTransferErrorCodes) Integer() []int { - r := o.IntegerPtr - return r -} - -// SetInteger is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoTypeLastTransferErrorCodes) SetInteger(newValue []int) *SnapmirrorInfoTypeLastTransferErrorCodes { - newSlice := make([]int, len(newValue)) - copy(newSlice, newValue) - o.IntegerPtr = newSlice - return o -} - -// LastTransferErrorCodes is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferErrorCodes() SnapmirrorInfoTypeLastTransferErrorCodes { - r := *o.LastTransferErrorCodesPtr - return r -} - -// SetLastTransferErrorCodes is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferErrorCodes(newValue SnapmirrorInfoTypeLastTransferErrorCodes) *SnapmirrorInfoType { - o.LastTransferErrorCodesPtr = &newValue - return o -} - -// LastTransferFrom is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferFrom() string { - r := *o.LastTransferFromPtr - return r -} - -// SetLastTransferFrom is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferFrom(newValue string) *SnapmirrorInfoType { - o.LastTransferFromPtr = &newValue - return o -} - -// LastTransferNetworkCompressionRatio is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferNetworkCompressionRatio() string { - r := *o.LastTransferNetworkCompressionRatioPtr - return r -} - -// SetLastTransferNetworkCompressionRatio is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferNetworkCompressionRatio(newValue string) *SnapmirrorInfoType { - o.LastTransferNetworkCompressionRatioPtr = &newValue - return o -} - -// LastTransferSize is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferSize() int { - r := *o.LastTransferSizePtr - return r -} - -// SetLastTransferSize is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferSize(newValue int) *SnapmirrorInfoType { - o.LastTransferSizePtr = &newValue - return o -} - -// LastTransferType is a 'getter' method -func (o *SnapmirrorInfoType) LastTransferType() string { - r := *o.LastTransferTypePtr - return r -} - -// SetLastTransferType is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetLastTransferType(newValue string) *SnapmirrorInfoType { - o.LastTransferTypePtr = &newValue - return o -} - -// MaxTransferRate is a 'getter' method -func (o *SnapmirrorInfoType) MaxTransferRate() int { - r := *o.MaxTransferRatePtr - return r -} - -// SetMaxTransferRate is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetMaxTransferRate(newValue int) *SnapmirrorInfoType { - o.MaxTransferRatePtr = &newValue - return o -} - -// MirrorState is a 'getter' method -func (o *SnapmirrorInfoType) MirrorState() string { - r := *o.MirrorStatePtr - return r -} - -// SetMirrorState is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetMirrorState(newValue string) *SnapmirrorInfoType { - o.MirrorStatePtr = &newValue - return o -} - -// NetworkCompressionRatio is a 'getter' method -func (o *SnapmirrorInfoType) NetworkCompressionRatio() string { - r := *o.NetworkCompressionRatioPtr - return r -} - -// SetNetworkCompressionRatio is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetNetworkCompressionRatio(newValue string) *SnapmirrorInfoType { - o.NetworkCompressionRatioPtr = &newValue - return o -} - -// NewestSnapshot is a 'getter' method -func (o *SnapmirrorInfoType) NewestSnapshot() string { - r := *o.NewestSnapshotPtr - return r -} - -// SetNewestSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetNewestSnapshot(newValue string) *SnapmirrorInfoType { - o.NewestSnapshotPtr = &newValue - return o -} - -// NewestSnapshotTimestamp is a 'getter' method -func (o *SnapmirrorInfoType) NewestSnapshotTimestamp() int { - r := *o.NewestSnapshotTimestampPtr - return r -} - -// SetNewestSnapshotTimestamp is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetNewestSnapshotTimestamp(newValue int) *SnapmirrorInfoType { - o.NewestSnapshotTimestampPtr = &newValue - return o -} - -// Opmask is a 'getter' method -func (o *SnapmirrorInfoType) Opmask() int { - r := *o.OpmaskPtr - return r -} - -// SetOpmask is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetOpmask(newValue int) *SnapmirrorInfoType { - o.OpmaskPtr = &newValue - return o -} - -// Policy is a 'getter' method -func (o *SnapmirrorInfoType) Policy() string { - r := *o.PolicyPtr - return r -} - -// SetPolicy is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetPolicy(newValue string) *SnapmirrorInfoType { - o.PolicyPtr = &newValue - return o -} - -// PolicyType is a 'getter' method -func (o *SnapmirrorInfoType) PolicyType() string { - r := *o.PolicyTypePtr - return r -} - -// SetPolicyType is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetPolicyType(newValue string) *SnapmirrorInfoType { - o.PolicyTypePtr = &newValue - return o -} - -// ProgressLastUpdated is a 'getter' method -func (o *SnapmirrorInfoType) ProgressLastUpdated() int { - r := *o.ProgressLastUpdatedPtr - return r -} - -// SetProgressLastUpdated is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetProgressLastUpdated(newValue int) *SnapmirrorInfoType { - o.ProgressLastUpdatedPtr = &newValue - return o -} - -// RelationshipControlPlane is a 'getter' method -func (o *SnapmirrorInfoType) RelationshipControlPlane() string { - r := *o.RelationshipControlPlanePtr - return r -} - -// SetRelationshipControlPlane is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetRelationshipControlPlane(newValue string) *SnapmirrorInfoType { - o.RelationshipControlPlanePtr = &newValue - return o -} - -// RelationshipGroupType is a 'getter' method -func (o *SnapmirrorInfoType) RelationshipGroupType() string { - r := *o.RelationshipGroupTypePtr - return r -} - -// SetRelationshipGroupType is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetRelationshipGroupType(newValue string) *SnapmirrorInfoType { - o.RelationshipGroupTypePtr = &newValue - return o -} - -// RelationshipId is a 'getter' method -func (o *SnapmirrorInfoType) RelationshipId() string { - r := *o.RelationshipIdPtr - return r -} - -// SetRelationshipId is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetRelationshipId(newValue string) *SnapmirrorInfoType { - o.RelationshipIdPtr = &newValue - return o -} - -// RelationshipProgress is a 'getter' method -func (o *SnapmirrorInfoType) RelationshipProgress() int { - r := *o.RelationshipProgressPtr - return r -} - -// SetRelationshipProgress is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetRelationshipProgress(newValue int) *SnapmirrorInfoType { - o.RelationshipProgressPtr = &newValue - return o -} - -// RelationshipStatus is a 'getter' method -func (o *SnapmirrorInfoType) RelationshipStatus() string { - r := *o.RelationshipStatusPtr - return r -} - -// SetRelationshipStatus is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetRelationshipStatus(newValue string) *SnapmirrorInfoType { - o.RelationshipStatusPtr = &newValue - return o -} - -// RelationshipType is a 'getter' method -func (o *SnapmirrorInfoType) RelationshipType() string { - r := *o.RelationshipTypePtr - return r -} - -// SetRelationshipType is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetRelationshipType(newValue string) *SnapmirrorInfoType { - o.RelationshipTypePtr = &newValue - return o -} - -// ResyncFailedCount is a 'getter' method -func (o *SnapmirrorInfoType) ResyncFailedCount() int { - r := *o.ResyncFailedCountPtr - return r -} - -// SetResyncFailedCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetResyncFailedCount(newValue int) *SnapmirrorInfoType { - o.ResyncFailedCountPtr = &newValue - return o -} - -// ResyncSuccessfulCount is a 'getter' method -func (o *SnapmirrorInfoType) ResyncSuccessfulCount() int { - r := *o.ResyncSuccessfulCountPtr - return r -} - -// SetResyncSuccessfulCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetResyncSuccessfulCount(newValue int) *SnapmirrorInfoType { - o.ResyncSuccessfulCountPtr = &newValue - return o -} - -// Schedule is a 'getter' method -func (o *SnapmirrorInfoType) Schedule() string { - r := *o.SchedulePtr - return r -} - -// SetSchedule is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSchedule(newValue string) *SnapmirrorInfoType { - o.SchedulePtr = &newValue - return o -} - -// SnapshotCheckpoint is a 'getter' method -func (o *SnapmirrorInfoType) SnapshotCheckpoint() int { - r := *o.SnapshotCheckpointPtr - return r -} - -// SetSnapshotCheckpoint is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSnapshotCheckpoint(newValue int) *SnapmirrorInfoType { - o.SnapshotCheckpointPtr = &newValue - return o -} - -// SnapshotProgress is a 'getter' method -func (o *SnapmirrorInfoType) SnapshotProgress() int { - r := *o.SnapshotProgressPtr - return r -} - -// SetSnapshotProgress is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSnapshotProgress(newValue int) *SnapmirrorInfoType { - o.SnapshotProgressPtr = &newValue - return o -} - -// SourceCluster is a 'getter' method -func (o *SnapmirrorInfoType) SourceCluster() string { - r := *o.SourceClusterPtr - return r -} - -// SetSourceCluster is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSourceCluster(newValue string) *SnapmirrorInfoType { - o.SourceClusterPtr = &newValue - return o -} - -// SourceLocation is a 'getter' method -func (o *SnapmirrorInfoType) SourceLocation() string { - r := *o.SourceLocationPtr - return r -} - -// SetSourceLocation is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSourceLocation(newValue string) *SnapmirrorInfoType { - o.SourceLocationPtr = &newValue - return o -} - -// SourceVolume is a 'getter' method -func (o *SnapmirrorInfoType) SourceVolume() string { - r := *o.SourceVolumePtr - return r -} - -// SetSourceVolume is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSourceVolume(newValue string) *SnapmirrorInfoType { - o.SourceVolumePtr = &newValue - return o -} - -// SourceVserver is a 'getter' method -func (o *SnapmirrorInfoType) SourceVserver() string { - r := *o.SourceVserverPtr - return r -} - -// SetSourceVserver is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSourceVserver(newValue string) *SnapmirrorInfoType { - o.SourceVserverPtr = &newValue - return o -} - -// SourceVserverUuid is a 'getter' method -func (o *SnapmirrorInfoType) SourceVserverUuid() string { - r := *o.SourceVserverUuidPtr - return r -} - -// SetSourceVserverUuid is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetSourceVserverUuid(newValue string) *SnapmirrorInfoType { - o.SourceVserverUuidPtr = &newValue - return o -} - -// TotalTransferBytes is a 'getter' method -func (o *SnapmirrorInfoType) TotalTransferBytes() int { - r := *o.TotalTransferBytesPtr - return r -} - -// SetTotalTransferBytes is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetTotalTransferBytes(newValue int) *SnapmirrorInfoType { - o.TotalTransferBytesPtr = &newValue - return o -} - -// TotalTransferTimeSecs is a 'getter' method -func (o *SnapmirrorInfoType) TotalTransferTimeSecs() int { - r := *o.TotalTransferTimeSecsPtr - return r -} - -// SetTotalTransferTimeSecs is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetTotalTransferTimeSecs(newValue int) *SnapmirrorInfoType { - o.TotalTransferTimeSecsPtr = &newValue - return o -} - -// TransferSnapshot is a 'getter' method -func (o *SnapmirrorInfoType) TransferSnapshot() string { - r := *o.TransferSnapshotPtr - return r -} - -// SetTransferSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetTransferSnapshot(newValue string) *SnapmirrorInfoType { - o.TransferSnapshotPtr = &newValue - return o -} - -// Tries is a 'getter' method -func (o *SnapmirrorInfoType) Tries() string { - r := *o.TriesPtr - return r -} - -// SetTries is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetTries(newValue string) *SnapmirrorInfoType { - o.TriesPtr = &newValue - return o -} - -// UnhealthyReason is a 'getter' method -func (o *SnapmirrorInfoType) UnhealthyReason() string { - r := *o.UnhealthyReasonPtr - return r -} - -// SetUnhealthyReason is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetUnhealthyReason(newValue string) *SnapmirrorInfoType { - o.UnhealthyReasonPtr = &newValue - return o -} - -// UpdateFailedCount is a 'getter' method -func (o *SnapmirrorInfoType) UpdateFailedCount() int { - r := *o.UpdateFailedCountPtr - return r -} - -// SetUpdateFailedCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetUpdateFailedCount(newValue int) *SnapmirrorInfoType { - o.UpdateFailedCountPtr = &newValue - return o -} - -// UpdateSuccessfulCount is a 'getter' method -func (o *SnapmirrorInfoType) UpdateSuccessfulCount() int { - r := *o.UpdateSuccessfulCountPtr - return r -} - -// SetUpdateSuccessfulCount is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetUpdateSuccessfulCount(newValue int) *SnapmirrorInfoType { - o.UpdateSuccessfulCountPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *SnapmirrorInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *SnapmirrorInfoType) SetVserver(newValue string) *SnapmirrorInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-info.go deleted file mode 100644 index 46009d27e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-info.go +++ /dev/null @@ -1,456 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapshotInfoType is a structure to represent a snapshot-info ZAPI object -type SnapshotInfoType struct { - XMLName xml.Name `xml:"snapshot-info"` - AccessTimePtr *int `xml:"access-time"` - AfsUsedPtr *int `xml:"afs-used"` - BusyPtr *bool `xml:"busy"` - CommentPtr *string `xml:"comment"` - CompressSavingsPtr *int `xml:"compress-savings"` - CompressionTypePtr *string `xml:"compression-type"` - ContainsLunClonesPtr *bool `xml:"contains-lun-clones"` - CumulativePercentageOfTotalBlocksPtr *int `xml:"cumulative-percentage-of-total-blocks"` - CumulativePercentageOfUsedBlocksPtr *int `xml:"cumulative-percentage-of-used-blocks"` - CumulativeTotalPtr *int `xml:"cumulative-total"` - DedupSavingsPtr *int `xml:"dedup-savings"` - DependencyPtr *string `xml:"dependency"` - ExpiryTimePtr *int `xml:"expiry-time"` - InfiniteSnaplockExpiryTimePtr *bool `xml:"infinite-snaplock-expiry-time"` - InofileVersionPtr *int `xml:"inofile-version"` - Is7ModeSnapshotPtr *bool `xml:"is-7-mode-snapshot"` - IsConstituentSnapshotPtr *bool `xml:"is-constituent-snapshot"` - NamePtr *string `xml:"name"` - PercentageOfTotalBlocksPtr *int `xml:"percentage-of-total-blocks"` - PercentageOfUsedBlocksPtr *int `xml:"percentage-of-used-blocks"` - SnaplockExpiryTimePtr *int `xml:"snaplock-expiry-time"` - SnapmirrorLabelPtr *string `xml:"snapmirror-label"` - SnapshotInstanceUuidPtr *UUIDType `xml:"snapshot-instance-uuid"` - SnapshotOwnersListPtr *SnapshotInfoTypeSnapshotOwnersList `xml:"snapshot-owners-list"` - // work in progress - SnapshotVersionUuidPtr *UUIDType `xml:"snapshot-version-uuid"` - StatePtr *string `xml:"state"` - TotalPtr *int `xml:"total"` - Vbn0SavingsPtr *int `xml:"vbn0-savings"` - VolumePtr *string `xml:"volume"` - VolumeProvenanceUuidPtr *UUIDType `xml:"volume-provenance-uuid"` - VserverPtr *string `xml:"vserver"` -} - -// NewSnapshotInfoType is a factory method for creating new instances of SnapshotInfoType objects -func NewSnapshotInfoType() *SnapshotInfoType { - return &SnapshotInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AccessTime is a 'getter' method -func (o *SnapshotInfoType) AccessTime() int { - r := *o.AccessTimePtr - return r -} - -// SetAccessTime is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetAccessTime(newValue int) *SnapshotInfoType { - o.AccessTimePtr = &newValue - return o -} - -// AfsUsed is a 'getter' method -func (o *SnapshotInfoType) AfsUsed() int { - r := *o.AfsUsedPtr - return r -} - -// SetAfsUsed is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetAfsUsed(newValue int) *SnapshotInfoType { - o.AfsUsedPtr = &newValue - return o -} - -// Busy is a 'getter' method -func (o *SnapshotInfoType) Busy() bool { - r := *o.BusyPtr - return r -} - -// SetBusy is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetBusy(newValue bool) *SnapshotInfoType { - o.BusyPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *SnapshotInfoType) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetComment(newValue string) *SnapshotInfoType { - o.CommentPtr = &newValue - return o -} - -// CompressSavings is a 'getter' method -func (o *SnapshotInfoType) CompressSavings() int { - r := *o.CompressSavingsPtr - return r -} - -// SetCompressSavings is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetCompressSavings(newValue int) *SnapshotInfoType { - o.CompressSavingsPtr = &newValue - return o -} - -// CompressionType is a 'getter' method -func (o *SnapshotInfoType) CompressionType() string { - r := *o.CompressionTypePtr - return r -} - -// SetCompressionType is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetCompressionType(newValue string) *SnapshotInfoType { - o.CompressionTypePtr = &newValue - return o -} - -// ContainsLunClones is a 'getter' method -func (o *SnapshotInfoType) ContainsLunClones() bool { - r := *o.ContainsLunClonesPtr - return r -} - -// SetContainsLunClones is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetContainsLunClones(newValue bool) *SnapshotInfoType { - o.ContainsLunClonesPtr = &newValue - return o -} - -// CumulativePercentageOfTotalBlocks is a 'getter' method -func (o *SnapshotInfoType) CumulativePercentageOfTotalBlocks() int { - r := *o.CumulativePercentageOfTotalBlocksPtr - return r -} - -// SetCumulativePercentageOfTotalBlocks is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetCumulativePercentageOfTotalBlocks(newValue int) *SnapshotInfoType { - o.CumulativePercentageOfTotalBlocksPtr = &newValue - return o -} - -// CumulativePercentageOfUsedBlocks is a 'getter' method -func (o *SnapshotInfoType) CumulativePercentageOfUsedBlocks() int { - r := *o.CumulativePercentageOfUsedBlocksPtr - return r -} - -// SetCumulativePercentageOfUsedBlocks is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetCumulativePercentageOfUsedBlocks(newValue int) *SnapshotInfoType { - o.CumulativePercentageOfUsedBlocksPtr = &newValue - return o -} - -// CumulativeTotal is a 'getter' method -func (o *SnapshotInfoType) CumulativeTotal() int { - r := *o.CumulativeTotalPtr - return r -} - -// SetCumulativeTotal is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetCumulativeTotal(newValue int) *SnapshotInfoType { - o.CumulativeTotalPtr = &newValue - return o -} - -// DedupSavings is a 'getter' method -func (o *SnapshotInfoType) DedupSavings() int { - r := *o.DedupSavingsPtr - return r -} - -// SetDedupSavings is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetDedupSavings(newValue int) *SnapshotInfoType { - o.DedupSavingsPtr = &newValue - return o -} - -// Dependency is a 'getter' method -func (o *SnapshotInfoType) Dependency() string { - r := *o.DependencyPtr - return r -} - -// SetDependency is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetDependency(newValue string) *SnapshotInfoType { - o.DependencyPtr = &newValue - return o -} - -// ExpiryTime is a 'getter' method -func (o *SnapshotInfoType) ExpiryTime() int { - r := *o.ExpiryTimePtr - return r -} - -// SetExpiryTime is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetExpiryTime(newValue int) *SnapshotInfoType { - o.ExpiryTimePtr = &newValue - return o -} - -// InfiniteSnaplockExpiryTime is a 'getter' method -func (o *SnapshotInfoType) InfiniteSnaplockExpiryTime() bool { - r := *o.InfiniteSnaplockExpiryTimePtr - return r -} - -// SetInfiniteSnaplockExpiryTime is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetInfiniteSnaplockExpiryTime(newValue bool) *SnapshotInfoType { - o.InfiniteSnaplockExpiryTimePtr = &newValue - return o -} - -// InofileVersion is a 'getter' method -func (o *SnapshotInfoType) InofileVersion() int { - r := *o.InofileVersionPtr - return r -} - -// SetInofileVersion is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetInofileVersion(newValue int) *SnapshotInfoType { - o.InofileVersionPtr = &newValue - return o -} - -// Is7ModeSnapshot is a 'getter' method -func (o *SnapshotInfoType) Is7ModeSnapshot() bool { - r := *o.Is7ModeSnapshotPtr - return r -} - -// SetIs7ModeSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetIs7ModeSnapshot(newValue bool) *SnapshotInfoType { - o.Is7ModeSnapshotPtr = &newValue - return o -} - -// IsConstituentSnapshot is a 'getter' method -func (o *SnapshotInfoType) IsConstituentSnapshot() bool { - r := *o.IsConstituentSnapshotPtr - return r -} - -// SetIsConstituentSnapshot is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetIsConstituentSnapshot(newValue bool) *SnapshotInfoType { - o.IsConstituentSnapshotPtr = &newValue - return o -} - -// Name is a 'getter' method -func (o *SnapshotInfoType) Name() string { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetName(newValue string) *SnapshotInfoType { - o.NamePtr = &newValue - return o -} - -// PercentageOfTotalBlocks is a 'getter' method -func (o *SnapshotInfoType) PercentageOfTotalBlocks() int { - r := *o.PercentageOfTotalBlocksPtr - return r -} - -// SetPercentageOfTotalBlocks is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetPercentageOfTotalBlocks(newValue int) *SnapshotInfoType { - o.PercentageOfTotalBlocksPtr = &newValue - return o -} - -// PercentageOfUsedBlocks is a 'getter' method -func (o *SnapshotInfoType) PercentageOfUsedBlocks() int { - r := *o.PercentageOfUsedBlocksPtr - return r -} - -// SetPercentageOfUsedBlocks is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetPercentageOfUsedBlocks(newValue int) *SnapshotInfoType { - o.PercentageOfUsedBlocksPtr = &newValue - return o -} - -// SnaplockExpiryTime is a 'getter' method -func (o *SnapshotInfoType) SnaplockExpiryTime() int { - r := *o.SnaplockExpiryTimePtr - return r -} - -// SetSnaplockExpiryTime is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetSnaplockExpiryTime(newValue int) *SnapshotInfoType { - o.SnaplockExpiryTimePtr = &newValue - return o -} - -// SnapmirrorLabel is a 'getter' method -func (o *SnapshotInfoType) SnapmirrorLabel() string { - r := *o.SnapmirrorLabelPtr - return r -} - -// SetSnapmirrorLabel is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetSnapmirrorLabel(newValue string) *SnapshotInfoType { - o.SnapmirrorLabelPtr = &newValue - return o -} - -// SnapshotInstanceUuid is a 'getter' method -func (o *SnapshotInfoType) SnapshotInstanceUuid() UUIDType { - r := *o.SnapshotInstanceUuidPtr - return r -} - -// SetSnapshotInstanceUuid is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetSnapshotInstanceUuid(newValue UUIDType) *SnapshotInfoType { - o.SnapshotInstanceUuidPtr = &newValue - return o -} - -// SnapshotInfoTypeSnapshotOwnersList is a wrapper -type SnapshotInfoTypeSnapshotOwnersList struct { - XMLName xml.Name `xml:"snapshot-owners-list"` - SnapshotOwnerPtr []SnapshotOwnerType `xml:"snapshot-owner"` -} - -// SnapshotOwner is a 'getter' method -func (o *SnapshotInfoTypeSnapshotOwnersList) SnapshotOwner() []SnapshotOwnerType { - r := o.SnapshotOwnerPtr - return r -} - -// SetSnapshotOwner is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoTypeSnapshotOwnersList) SetSnapshotOwner(newValue []SnapshotOwnerType) *SnapshotInfoTypeSnapshotOwnersList { - newSlice := make([]SnapshotOwnerType, len(newValue)) - copy(newSlice, newValue) - o.SnapshotOwnerPtr = newSlice - return o -} - -// SnapshotOwnersList is a 'getter' method -func (o *SnapshotInfoType) SnapshotOwnersList() SnapshotInfoTypeSnapshotOwnersList { - r := *o.SnapshotOwnersListPtr - return r -} - -// SetSnapshotOwnersList is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetSnapshotOwnersList(newValue SnapshotInfoTypeSnapshotOwnersList) *SnapshotInfoType { - o.SnapshotOwnersListPtr = &newValue - return o -} - -// SnapshotVersionUuid is a 'getter' method -func (o *SnapshotInfoType) SnapshotVersionUuid() UUIDType { - r := *o.SnapshotVersionUuidPtr - return r -} - -// SetSnapshotVersionUuid is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetSnapshotVersionUuid(newValue UUIDType) *SnapshotInfoType { - o.SnapshotVersionUuidPtr = &newValue - return o -} - -// State is a 'getter' method -func (o *SnapshotInfoType) State() string { - r := *o.StatePtr - return r -} - -// SetState is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetState(newValue string) *SnapshotInfoType { - o.StatePtr = &newValue - return o -} - -// Total is a 'getter' method -func (o *SnapshotInfoType) Total() int { - r := *o.TotalPtr - return r -} - -// SetTotal is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetTotal(newValue int) *SnapshotInfoType { - o.TotalPtr = &newValue - return o -} - -// Vbn0Savings is a 'getter' method -func (o *SnapshotInfoType) Vbn0Savings() int { - r := *o.Vbn0SavingsPtr - return r -} - -// SetVbn0Savings is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetVbn0Savings(newValue int) *SnapshotInfoType { - o.Vbn0SavingsPtr = &newValue - return o -} - -// Volume is a 'getter' method -func (o *SnapshotInfoType) Volume() string { - r := *o.VolumePtr - return r -} - -// SetVolume is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetVolume(newValue string) *SnapshotInfoType { - o.VolumePtr = &newValue - return o -} - -// VolumeProvenanceUuid is a 'getter' method -func (o *SnapshotInfoType) VolumeProvenanceUuid() UUIDType { - r := *o.VolumeProvenanceUuidPtr - return r -} - -// SetVolumeProvenanceUuid is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetVolumeProvenanceUuid(newValue UUIDType) *SnapshotInfoType { - o.VolumeProvenanceUuidPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *SnapshotInfoType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *SnapshotInfoType) SetVserver(newValue string) *SnapshotInfoType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-owner.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-owner.go deleted file mode 100644 index 3ba4312bd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-owner.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SnapshotOwnerType is a structure to represent a snapshot-owner ZAPI object -type SnapshotOwnerType struct { - XMLName xml.Name `xml:"snapshot-owner"` - OwnerPtr *string `xml:"owner"` -} - -// NewSnapshotOwnerType is a factory method for creating new instances of SnapshotOwnerType objects -func NewSnapshotOwnerType() *SnapshotOwnerType { - return &SnapshotOwnerType{} -} - -// ToXML converts this object into an xml string representation -func (o *SnapshotOwnerType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SnapshotOwnerType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Owner is a 'getter' method -func (o *SnapshotOwnerType) Owner() string { - r := *o.OwnerPtr - return r -} - -// SetOwner is a fluent style 'setter' method that can be chained -func (o *SnapshotOwnerType) SetOwner(newValue string) *SnapshotOwnerType { - o.OwnerPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-policy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-policy.go deleted file mode 100644 index 61901e082..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-snapshot-policy.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SnapshotPolicyType is a structure to represent a snapshot-policy ZAPI object -type SnapshotPolicyType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-space-information.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-space-information.go deleted file mode 100644 index 2c08952cc..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-space-information.go +++ /dev/null @@ -1,370 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SpaceInformationType is a structure to represent a space-information ZAPI object -type SpaceInformationType struct { - XMLName xml.Name `xml:"space-information"` - AggregatePtr *string `xml:"aggregate"` - AggregateMetadataPtr *int `xml:"aggregate-metadata"` - AggregateMetadataPercentPtr *int `xml:"aggregate-metadata-percent"` - AggregateSizePtr *int `xml:"aggregate-size"` - ObjectStoreMetadataPtr *int `xml:"object-store-metadata"` - ObjectStoreMetadataPercentPtr *int `xml:"object-store-metadata-percent"` - ObjectStorePhysicalUsedPtr *int `xml:"object-store-physical-used"` - ObjectStorePhysicalUsedPercentPtr *int `xml:"object-store-physical-used-percent"` - ObjectStoreReferencedCapacityPtr *int `xml:"object-store-referenced-capacity"` - ObjectStoreReferencedCapacityPercentPtr *int `xml:"object-store-referenced-capacity-percent"` - ObjectStoreSisSpaceSavedPtr *int `xml:"object-store-sis-space-saved"` - ObjectStoreSisSpaceSavedPercentPtr *int `xml:"object-store-sis-space-saved-percent"` - ObjectStoreSizePtr *int `xml:"object-store-size"` - ObjectStoreUnreclaimedSpacePtr *int `xml:"object-store-unreclaimed-space"` - ObjectStoreUnreclaimedSpacePercentPtr *int `xml:"object-store-unreclaimed-space-percent"` - PercentSnapshotSpacePtr *int `xml:"percent-snapshot-space"` - PhysicalUsedPtr *int `xml:"physical-used"` - PhysicalUsedPercentPtr *int `xml:"physical-used-percent"` - SnapSizeTotalPtr *int `xml:"snap-size-total"` - SnapshotReserveUnusablePtr *int `xml:"snapshot-reserve-unusable"` - SnapshotReserveUnusablePercentPtr *int `xml:"snapshot-reserve-unusable-percent"` - TierNamePtr *string `xml:"tier-name"` - UsedIncludingSnapshotReservePtr *int `xml:"used-including-snapshot-reserve"` - UsedIncludingSnapshotReservePercentPtr *int `xml:"used-including-snapshot-reserve-percent"` - VolumeFootprintsPtr *int `xml:"volume-footprints"` - VolumeFootprintsPercentPtr *int `xml:"volume-footprints-percent"` -} - -// NewSpaceInformationType is a factory method for creating new instances of SpaceInformationType objects -func NewSpaceInformationType() *SpaceInformationType { - return &SpaceInformationType{} -} - -// ToXML converts this object into an xml string representation -func (o *SpaceInformationType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SpaceInformationType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Aggregate is a 'getter' method -func (o *SpaceInformationType) Aggregate() string { - r := *o.AggregatePtr - return r -} - -// SetAggregate is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetAggregate(newValue string) *SpaceInformationType { - o.AggregatePtr = &newValue - return o -} - -// AggregateMetadata is a 'getter' method -func (o *SpaceInformationType) AggregateMetadata() int { - r := *o.AggregateMetadataPtr - return r -} - -// SetAggregateMetadata is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetAggregateMetadata(newValue int) *SpaceInformationType { - o.AggregateMetadataPtr = &newValue - return o -} - -// AggregateMetadataPercent is a 'getter' method -func (o *SpaceInformationType) AggregateMetadataPercent() int { - r := *o.AggregateMetadataPercentPtr - return r -} - -// SetAggregateMetadataPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetAggregateMetadataPercent(newValue int) *SpaceInformationType { - o.AggregateMetadataPercentPtr = &newValue - return o -} - -// AggregateSize is a 'getter' method -func (o *SpaceInformationType) AggregateSize() int { - r := *o.AggregateSizePtr - return r -} - -// SetAggregateSize is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetAggregateSize(newValue int) *SpaceInformationType { - o.AggregateSizePtr = &newValue - return o -} - -// ObjectStoreMetadata is a 'getter' method -func (o *SpaceInformationType) ObjectStoreMetadata() int { - r := *o.ObjectStoreMetadataPtr - return r -} - -// SetObjectStoreMetadata is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreMetadata(newValue int) *SpaceInformationType { - o.ObjectStoreMetadataPtr = &newValue - return o -} - -// ObjectStoreMetadataPercent is a 'getter' method -func (o *SpaceInformationType) ObjectStoreMetadataPercent() int { - r := *o.ObjectStoreMetadataPercentPtr - return r -} - -// SetObjectStoreMetadataPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreMetadataPercent(newValue int) *SpaceInformationType { - o.ObjectStoreMetadataPercentPtr = &newValue - return o -} - -// ObjectStorePhysicalUsed is a 'getter' method -func (o *SpaceInformationType) ObjectStorePhysicalUsed() int { - r := *o.ObjectStorePhysicalUsedPtr - return r -} - -// SetObjectStorePhysicalUsed is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStorePhysicalUsed(newValue int) *SpaceInformationType { - o.ObjectStorePhysicalUsedPtr = &newValue - return o -} - -// ObjectStorePhysicalUsedPercent is a 'getter' method -func (o *SpaceInformationType) ObjectStorePhysicalUsedPercent() int { - r := *o.ObjectStorePhysicalUsedPercentPtr - return r -} - -// SetObjectStorePhysicalUsedPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStorePhysicalUsedPercent(newValue int) *SpaceInformationType { - o.ObjectStorePhysicalUsedPercentPtr = &newValue - return o -} - -// ObjectStoreReferencedCapacity is a 'getter' method -func (o *SpaceInformationType) ObjectStoreReferencedCapacity() int { - r := *o.ObjectStoreReferencedCapacityPtr - return r -} - -// SetObjectStoreReferencedCapacity is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreReferencedCapacity(newValue int) *SpaceInformationType { - o.ObjectStoreReferencedCapacityPtr = &newValue - return o -} - -// ObjectStoreReferencedCapacityPercent is a 'getter' method -func (o *SpaceInformationType) ObjectStoreReferencedCapacityPercent() int { - r := *o.ObjectStoreReferencedCapacityPercentPtr - return r -} - -// SetObjectStoreReferencedCapacityPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreReferencedCapacityPercent(newValue int) *SpaceInformationType { - o.ObjectStoreReferencedCapacityPercentPtr = &newValue - return o -} - -// ObjectStoreSisSpaceSaved is a 'getter' method -func (o *SpaceInformationType) ObjectStoreSisSpaceSaved() int { - r := *o.ObjectStoreSisSpaceSavedPtr - return r -} - -// SetObjectStoreSisSpaceSaved is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreSisSpaceSaved(newValue int) *SpaceInformationType { - o.ObjectStoreSisSpaceSavedPtr = &newValue - return o -} - -// ObjectStoreSisSpaceSavedPercent is a 'getter' method -func (o *SpaceInformationType) ObjectStoreSisSpaceSavedPercent() int { - r := *o.ObjectStoreSisSpaceSavedPercentPtr - return r -} - -// SetObjectStoreSisSpaceSavedPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreSisSpaceSavedPercent(newValue int) *SpaceInformationType { - o.ObjectStoreSisSpaceSavedPercentPtr = &newValue - return o -} - -// ObjectStoreSize is a 'getter' method -func (o *SpaceInformationType) ObjectStoreSize() int { - r := *o.ObjectStoreSizePtr - return r -} - -// SetObjectStoreSize is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreSize(newValue int) *SpaceInformationType { - o.ObjectStoreSizePtr = &newValue - return o -} - -// ObjectStoreUnreclaimedSpace is a 'getter' method -func (o *SpaceInformationType) ObjectStoreUnreclaimedSpace() int { - r := *o.ObjectStoreUnreclaimedSpacePtr - return r -} - -// SetObjectStoreUnreclaimedSpace is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreUnreclaimedSpace(newValue int) *SpaceInformationType { - o.ObjectStoreUnreclaimedSpacePtr = &newValue - return o -} - -// ObjectStoreUnreclaimedSpacePercent is a 'getter' method -func (o *SpaceInformationType) ObjectStoreUnreclaimedSpacePercent() int { - r := *o.ObjectStoreUnreclaimedSpacePercentPtr - return r -} - -// SetObjectStoreUnreclaimedSpacePercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetObjectStoreUnreclaimedSpacePercent(newValue int) *SpaceInformationType { - o.ObjectStoreUnreclaimedSpacePercentPtr = &newValue - return o -} - -// PercentSnapshotSpace is a 'getter' method -func (o *SpaceInformationType) PercentSnapshotSpace() int { - r := *o.PercentSnapshotSpacePtr - return r -} - -// SetPercentSnapshotSpace is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetPercentSnapshotSpace(newValue int) *SpaceInformationType { - o.PercentSnapshotSpacePtr = &newValue - return o -} - -// PhysicalUsed is a 'getter' method -func (o *SpaceInformationType) PhysicalUsed() int { - r := *o.PhysicalUsedPtr - return r -} - -// SetPhysicalUsed is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetPhysicalUsed(newValue int) *SpaceInformationType { - o.PhysicalUsedPtr = &newValue - return o -} - -// PhysicalUsedPercent is a 'getter' method -func (o *SpaceInformationType) PhysicalUsedPercent() int { - r := *o.PhysicalUsedPercentPtr - return r -} - -// SetPhysicalUsedPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetPhysicalUsedPercent(newValue int) *SpaceInformationType { - o.PhysicalUsedPercentPtr = &newValue - return o -} - -// SnapSizeTotal is a 'getter' method -func (o *SpaceInformationType) SnapSizeTotal() int { - r := *o.SnapSizeTotalPtr - return r -} - -// SetSnapSizeTotal is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetSnapSizeTotal(newValue int) *SpaceInformationType { - o.SnapSizeTotalPtr = &newValue - return o -} - -// SnapshotReserveUnusable is a 'getter' method -func (o *SpaceInformationType) SnapshotReserveUnusable() int { - r := *o.SnapshotReserveUnusablePtr - return r -} - -// SetSnapshotReserveUnusable is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetSnapshotReserveUnusable(newValue int) *SpaceInformationType { - o.SnapshotReserveUnusablePtr = &newValue - return o -} - -// SnapshotReserveUnusablePercent is a 'getter' method -func (o *SpaceInformationType) SnapshotReserveUnusablePercent() int { - r := *o.SnapshotReserveUnusablePercentPtr - return r -} - -// SetSnapshotReserveUnusablePercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetSnapshotReserveUnusablePercent(newValue int) *SpaceInformationType { - o.SnapshotReserveUnusablePercentPtr = &newValue - return o -} - -// TierName is a 'getter' method -func (o *SpaceInformationType) TierName() string { - r := *o.TierNamePtr - return r -} - -// SetTierName is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetTierName(newValue string) *SpaceInformationType { - o.TierNamePtr = &newValue - return o -} - -// UsedIncludingSnapshotReserve is a 'getter' method -func (o *SpaceInformationType) UsedIncludingSnapshotReserve() int { - r := *o.UsedIncludingSnapshotReservePtr - return r -} - -// SetUsedIncludingSnapshotReserve is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetUsedIncludingSnapshotReserve(newValue int) *SpaceInformationType { - o.UsedIncludingSnapshotReservePtr = &newValue - return o -} - -// UsedIncludingSnapshotReservePercent is a 'getter' method -func (o *SpaceInformationType) UsedIncludingSnapshotReservePercent() int { - r := *o.UsedIncludingSnapshotReservePercentPtr - return r -} - -// SetUsedIncludingSnapshotReservePercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetUsedIncludingSnapshotReservePercent(newValue int) *SpaceInformationType { - o.UsedIncludingSnapshotReservePercentPtr = &newValue - return o -} - -// VolumeFootprints is a 'getter' method -func (o *SpaceInformationType) VolumeFootprints() int { - r := *o.VolumeFootprintsPtr - return r -} - -// SetVolumeFootprints is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetVolumeFootprints(newValue int) *SpaceInformationType { - o.VolumeFootprintsPtr = &newValue - return o -} - -// VolumeFootprintsPercent is a 'getter' method -func (o *SpaceInformationType) VolumeFootprintsPercent() int { - r := *o.VolumeFootprintsPercentPtr - return r -} - -// SetVolumeFootprintsPercent is a fluent style 'setter' method that can be chained -func (o *SpaceInformationType) SetVolumeFootprintsPercent(newValue int) *SpaceInformationType { - o.VolumeFootprintsPercentPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-space-slo-enum.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-space-slo-enum.go deleted file mode 100644 index 76e9b6ea2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-space-slo-enum.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SpaceSloEnumType is a structure to represent a space-slo-enum ZAPI object -type SpaceSloEnumType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-storage-configuration-state-enum.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-storage-configuration-state-enum.go deleted file mode 100644 index a47e02b18..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-storage-configuration-state-enum.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// StorageConfigurationStateEnumType is a structure to represent a storage-configuration-state-enum ZAPI object -type StorageConfigurationStateEnumType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-subnet-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-subnet-name.go deleted file mode 100644 index 8038c84a2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-subnet-name.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// SubnetNameType is a structure to represent a subnet-name ZAPI object -type SubnetNameType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-system-version-tuple.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-system-version-tuple.go deleted file mode 100644 index c3f7918d5..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-system-version-tuple.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// SystemVersionTupleType is a structure to represent a system-version-tuple ZAPI object -type SystemVersionTupleType struct { - XMLName xml.Name `xml:"system-version-tuple"` - GenerationPtr *int `xml:"generation"` - MajorPtr *int `xml:"major"` - MinorPtr *int `xml:"minor"` -} - -// NewSystemVersionTupleType is a factory method for creating new instances of SystemVersionTupleType objects -func NewSystemVersionTupleType() *SystemVersionTupleType { - return &SystemVersionTupleType{} -} - -// ToXML converts this object into an xml string representation -func (o *SystemVersionTupleType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o SystemVersionTupleType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Generation is a 'getter' method -func (o *SystemVersionTupleType) Generation() int { - r := *o.GenerationPtr - return r -} - -// SetGeneration is a fluent style 'setter' method that can be chained -func (o *SystemVersionTupleType) SetGeneration(newValue int) *SystemVersionTupleType { - o.GenerationPtr = &newValue - return o -} - -// Major is a 'getter' method -func (o *SystemVersionTupleType) Major() int { - r := *o.MajorPtr - return r -} - -// SetMajor is a fluent style 'setter' method that can be chained -func (o *SystemVersionTupleType) SetMajor(newValue int) *SystemVersionTupleType { - o.MajorPtr = &newValue - return o -} - -// Minor is a 'getter' method -func (o *SystemVersionTupleType) Minor() int { - r := *o.MinorPtr - return r -} - -// SetMinor is a fluent style 'setter' method that can be chained -func (o *SystemVersionTupleType) SetMinor(newValue int) *SystemVersionTupleType { - o.MinorPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vm-system-disks.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vm-system-disks.go deleted file mode 100644 index 3e52b60a0..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vm-system-disks.go +++ /dev/null @@ -1,110 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VmSystemDisksType is a structure to represent a vm-system-disks ZAPI object -type VmSystemDisksType struct { - XMLName xml.Name `xml:"vm-system-disks"` - VmBootdiskAreaNamePtr *string `xml:"vm-bootdisk-area-name"` - VmBootdiskFileNamePtr *string `xml:"vm-bootdisk-file-name"` - VmCorediskAreaNamePtr *string `xml:"vm-coredisk-area-name"` - VmCorediskFileNamePtr *string `xml:"vm-coredisk-file-name"` - VmLogdiskAreaNamePtr *string `xml:"vm-logdisk-area-name"` - VmLogdiskFileNamePtr *string `xml:"vm-logdisk-file-name"` -} - -// NewVmSystemDisksType is a factory method for creating new instances of VmSystemDisksType objects -func NewVmSystemDisksType() *VmSystemDisksType { - return &VmSystemDisksType{} -} - -// ToXML converts this object into an xml string representation -func (o *VmSystemDisksType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VmSystemDisksType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VmBootdiskAreaName is a 'getter' method -func (o *VmSystemDisksType) VmBootdiskAreaName() string { - r := *o.VmBootdiskAreaNamePtr - return r -} - -// SetVmBootdiskAreaName is a fluent style 'setter' method that can be chained -func (o *VmSystemDisksType) SetVmBootdiskAreaName(newValue string) *VmSystemDisksType { - o.VmBootdiskAreaNamePtr = &newValue - return o -} - -// VmBootdiskFileName is a 'getter' method -func (o *VmSystemDisksType) VmBootdiskFileName() string { - r := *o.VmBootdiskFileNamePtr - return r -} - -// SetVmBootdiskFileName is a fluent style 'setter' method that can be chained -func (o *VmSystemDisksType) SetVmBootdiskFileName(newValue string) *VmSystemDisksType { - o.VmBootdiskFileNamePtr = &newValue - return o -} - -// VmCorediskAreaName is a 'getter' method -func (o *VmSystemDisksType) VmCorediskAreaName() string { - r := *o.VmCorediskAreaNamePtr - return r -} - -// SetVmCorediskAreaName is a fluent style 'setter' method that can be chained -func (o *VmSystemDisksType) SetVmCorediskAreaName(newValue string) *VmSystemDisksType { - o.VmCorediskAreaNamePtr = &newValue - return o -} - -// VmCorediskFileName is a 'getter' method -func (o *VmSystemDisksType) VmCorediskFileName() string { - r := *o.VmCorediskFileNamePtr - return r -} - -// SetVmCorediskFileName is a fluent style 'setter' method that can be chained -func (o *VmSystemDisksType) SetVmCorediskFileName(newValue string) *VmSystemDisksType { - o.VmCorediskFileNamePtr = &newValue - return o -} - -// VmLogdiskAreaName is a 'getter' method -func (o *VmSystemDisksType) VmLogdiskAreaName() string { - r := *o.VmLogdiskAreaNamePtr - return r -} - -// SetVmLogdiskAreaName is a fluent style 'setter' method that can be chained -func (o *VmSystemDisksType) SetVmLogdiskAreaName(newValue string) *VmSystemDisksType { - o.VmLogdiskAreaNamePtr = &newValue - return o -} - -// VmLogdiskFileName is a 'getter' method -func (o *VmSystemDisksType) VmLogdiskFileName() string { - r := *o.VmLogdiskFileNamePtr - return r -} - -// SetVmLogdiskFileName is a fluent style 'setter' method that can be chained -func (o *VmSystemDisksType) SetVmLogdiskFileName(newValue string) *VmSystemDisksType { - o.VmLogdiskFileNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vmhost-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vmhost-info.go deleted file mode 100644 index b49f7473d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vmhost-info.go +++ /dev/null @@ -1,318 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VmhostInfoType is a structure to represent a vmhost-info ZAPI object -type VmhostInfoType struct { - XMLName xml.Name `xml:"vmhost-info"` - VmCustomMaxCapacityPtr *int `xml:"vm-custom-max-capacity"` - VmUuidPtr *string `xml:"vm-uuid"` - VmhostBiosReleaseDatePtr *string `xml:"vmhost-bios-release-date"` - VmhostBiosVersionPtr *string `xml:"vmhost-bios-version"` - VmhostBootTimePtr *string `xml:"vmhost-boot-time"` - VmhostCpuClockRatePtr *int `xml:"vmhost-cpu-clock-rate"` - VmhostCpuCoreCountPtr *int `xml:"vmhost-cpu-core-count"` - VmhostCpuSocketCountPtr *int `xml:"vmhost-cpu-socket-count"` - VmhostCpuThreadCountPtr *int `xml:"vmhost-cpu-thread-count"` - VmhostErrorPtr *string `xml:"vmhost-error"` - VmhostGatewayPtr *string `xml:"vmhost-gateway"` - VmhostHardwareVendorPtr *string `xml:"vmhost-hardware-vendor"` - VmhostHypervisorPtr *string `xml:"vmhost-hypervisor"` - VmhostIpAddressPtr *string `xml:"vmhost-ip-address"` - VmhostMemoryPtr *int `xml:"vmhost-memory"` - VmhostModelPtr *string `xml:"vmhost-model"` - VmhostNamePtr *string `xml:"vmhost-name"` - VmhostNetmaskPtr *string `xml:"vmhost-netmask"` - VmhostProcessorIdPtr *string `xml:"vmhost-processor-id"` - VmhostProcessorTypePtr *string `xml:"vmhost-processor-type"` - VmhostSoftwareVendorPtr *string `xml:"vmhost-software-vendor"` - VmhostUuidPtr *string `xml:"vmhost-uuid"` -} - -// NewVmhostInfoType is a factory method for creating new instances of VmhostInfoType objects -func NewVmhostInfoType() *VmhostInfoType { - return &VmhostInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *VmhostInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VmhostInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VmCustomMaxCapacity is a 'getter' method -func (o *VmhostInfoType) VmCustomMaxCapacity() int { - r := *o.VmCustomMaxCapacityPtr - return r -} - -// SetVmCustomMaxCapacity is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmCustomMaxCapacity(newValue int) *VmhostInfoType { - o.VmCustomMaxCapacityPtr = &newValue - return o -} - -// VmUuid is a 'getter' method -func (o *VmhostInfoType) VmUuid() string { - r := *o.VmUuidPtr - return r -} - -// SetVmUuid is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmUuid(newValue string) *VmhostInfoType { - o.VmUuidPtr = &newValue - return o -} - -// VmhostBiosReleaseDate is a 'getter' method -func (o *VmhostInfoType) VmhostBiosReleaseDate() string { - r := *o.VmhostBiosReleaseDatePtr - return r -} - -// SetVmhostBiosReleaseDate is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostBiosReleaseDate(newValue string) *VmhostInfoType { - o.VmhostBiosReleaseDatePtr = &newValue - return o -} - -// VmhostBiosVersion is a 'getter' method -func (o *VmhostInfoType) VmhostBiosVersion() string { - r := *o.VmhostBiosVersionPtr - return r -} - -// SetVmhostBiosVersion is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostBiosVersion(newValue string) *VmhostInfoType { - o.VmhostBiosVersionPtr = &newValue - return o -} - -// VmhostBootTime is a 'getter' method -func (o *VmhostInfoType) VmhostBootTime() string { - r := *o.VmhostBootTimePtr - return r -} - -// SetVmhostBootTime is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostBootTime(newValue string) *VmhostInfoType { - o.VmhostBootTimePtr = &newValue - return o -} - -// VmhostCpuClockRate is a 'getter' method -func (o *VmhostInfoType) VmhostCpuClockRate() int { - r := *o.VmhostCpuClockRatePtr - return r -} - -// SetVmhostCpuClockRate is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostCpuClockRate(newValue int) *VmhostInfoType { - o.VmhostCpuClockRatePtr = &newValue - return o -} - -// VmhostCpuCoreCount is a 'getter' method -func (o *VmhostInfoType) VmhostCpuCoreCount() int { - r := *o.VmhostCpuCoreCountPtr - return r -} - -// SetVmhostCpuCoreCount is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostCpuCoreCount(newValue int) *VmhostInfoType { - o.VmhostCpuCoreCountPtr = &newValue - return o -} - -// VmhostCpuSocketCount is a 'getter' method -func (o *VmhostInfoType) VmhostCpuSocketCount() int { - r := *o.VmhostCpuSocketCountPtr - return r -} - -// SetVmhostCpuSocketCount is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostCpuSocketCount(newValue int) *VmhostInfoType { - o.VmhostCpuSocketCountPtr = &newValue - return o -} - -// VmhostCpuThreadCount is a 'getter' method -func (o *VmhostInfoType) VmhostCpuThreadCount() int { - r := *o.VmhostCpuThreadCountPtr - return r -} - -// SetVmhostCpuThreadCount is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostCpuThreadCount(newValue int) *VmhostInfoType { - o.VmhostCpuThreadCountPtr = &newValue - return o -} - -// VmhostError is a 'getter' method -func (o *VmhostInfoType) VmhostError() string { - r := *o.VmhostErrorPtr - return r -} - -// SetVmhostError is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostError(newValue string) *VmhostInfoType { - o.VmhostErrorPtr = &newValue - return o -} - -// VmhostGateway is a 'getter' method -func (o *VmhostInfoType) VmhostGateway() string { - r := *o.VmhostGatewayPtr - return r -} - -// SetVmhostGateway is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostGateway(newValue string) *VmhostInfoType { - o.VmhostGatewayPtr = &newValue - return o -} - -// VmhostHardwareVendor is a 'getter' method -func (o *VmhostInfoType) VmhostHardwareVendor() string { - r := *o.VmhostHardwareVendorPtr - return r -} - -// SetVmhostHardwareVendor is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostHardwareVendor(newValue string) *VmhostInfoType { - o.VmhostHardwareVendorPtr = &newValue - return o -} - -// VmhostHypervisor is a 'getter' method -func (o *VmhostInfoType) VmhostHypervisor() string { - r := *o.VmhostHypervisorPtr - return r -} - -// SetVmhostHypervisor is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostHypervisor(newValue string) *VmhostInfoType { - o.VmhostHypervisorPtr = &newValue - return o -} - -// VmhostIpAddress is a 'getter' method -func (o *VmhostInfoType) VmhostIpAddress() string { - r := *o.VmhostIpAddressPtr - return r -} - -// SetVmhostIpAddress is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostIpAddress(newValue string) *VmhostInfoType { - o.VmhostIpAddressPtr = &newValue - return o -} - -// VmhostMemory is a 'getter' method -func (o *VmhostInfoType) VmhostMemory() int { - r := *o.VmhostMemoryPtr - return r -} - -// SetVmhostMemory is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostMemory(newValue int) *VmhostInfoType { - o.VmhostMemoryPtr = &newValue - return o -} - -// VmhostModel is a 'getter' method -func (o *VmhostInfoType) VmhostModel() string { - r := *o.VmhostModelPtr - return r -} - -// SetVmhostModel is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostModel(newValue string) *VmhostInfoType { - o.VmhostModelPtr = &newValue - return o -} - -// VmhostName is a 'getter' method -func (o *VmhostInfoType) VmhostName() string { - r := *o.VmhostNamePtr - return r -} - -// SetVmhostName is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostName(newValue string) *VmhostInfoType { - o.VmhostNamePtr = &newValue - return o -} - -// VmhostNetmask is a 'getter' method -func (o *VmhostInfoType) VmhostNetmask() string { - r := *o.VmhostNetmaskPtr - return r -} - -// SetVmhostNetmask is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostNetmask(newValue string) *VmhostInfoType { - o.VmhostNetmaskPtr = &newValue - return o -} - -// VmhostProcessorId is a 'getter' method -func (o *VmhostInfoType) VmhostProcessorId() string { - r := *o.VmhostProcessorIdPtr - return r -} - -// SetVmhostProcessorId is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostProcessorId(newValue string) *VmhostInfoType { - o.VmhostProcessorIdPtr = &newValue - return o -} - -// VmhostProcessorType is a 'getter' method -func (o *VmhostInfoType) VmhostProcessorType() string { - r := *o.VmhostProcessorTypePtr - return r -} - -// SetVmhostProcessorType is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostProcessorType(newValue string) *VmhostInfoType { - o.VmhostProcessorTypePtr = &newValue - return o -} - -// VmhostSoftwareVendor is a 'getter' method -func (o *VmhostInfoType) VmhostSoftwareVendor() string { - r := *o.VmhostSoftwareVendorPtr - return r -} - -// SetVmhostSoftwareVendor is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostSoftwareVendor(newValue string) *VmhostInfoType { - o.VmhostSoftwareVendorPtr = &newValue - return o -} - -// VmhostUuid is a 'getter' method -func (o *VmhostInfoType) VmhostUuid() string { - r := *o.VmhostUuidPtr - return r -} - -// SetVmhostUuid is a fluent style 'setter' method that can be chained -func (o *VmhostInfoType) SetVmhostUuid(newValue string) *VmhostInfoType { - o.VmhostUuidPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volstyle.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volstyle.go deleted file mode 100644 index a760acbf4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volstyle.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// VolstyleType is a structure to represent a volstyle ZAPI object -type VolstyleType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-antivirus-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-antivirus-attributes.go deleted file mode 100644 index 06a248a89..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-antivirus-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeAntivirusAttributesType is a structure to represent a volume-antivirus-attributes ZAPI object -type VolumeAntivirusAttributesType struct { - XMLName xml.Name `xml:"volume-antivirus-attributes"` - OnAccessPolicyPtr *string `xml:"on-access-policy"` -} - -// NewVolumeAntivirusAttributesType is a factory method for creating new instances of VolumeAntivirusAttributesType objects -func NewVolumeAntivirusAttributesType() *VolumeAntivirusAttributesType { - return &VolumeAntivirusAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeAntivirusAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeAntivirusAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// OnAccessPolicy is a 'getter' method -func (o *VolumeAntivirusAttributesType) OnAccessPolicy() string { - r := *o.OnAccessPolicyPtr - return r -} - -// SetOnAccessPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeAntivirusAttributesType) SetOnAccessPolicy(newValue string) *VolumeAntivirusAttributesType { - o.OnAccessPolicyPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-attributes.go deleted file mode 100644 index 4ba0815b2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-attributes.go +++ /dev/null @@ -1,396 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeAttributesType is a structure to represent a volume-attributes ZAPI object -type VolumeAttributesType struct { - XMLName xml.Name `xml:"volume-attributes"` - EncryptPtr *bool `xml:"encrypt"` - KeyIdPtr *string `xml:"key-id"` - VolumeAntivirusAttributesPtr *VolumeAntivirusAttributesType `xml:"volume-antivirus-attributes"` - VolumeAutobalanceAttributesPtr *VolumeAutobalanceAttributesType `xml:"volume-autobalance-attributes"` - VolumeAutosizeAttributesPtr *VolumeAutosizeAttributesType `xml:"volume-autosize-attributes"` - VolumeCloneAttributesPtr *VolumeCloneAttributesType `xml:"volume-clone-attributes"` - VolumeCompAggrAttributesPtr *VolumeCompAggrAttributesType `xml:"volume-comp-aggr-attributes"` - VolumeDirectoryAttributesPtr *VolumeDirectoryAttributesType `xml:"volume-directory-attributes"` - VolumeExportAttributesPtr *VolumeExportAttributesType `xml:"volume-export-attributes"` - VolumeFlexcacheAttributesPtr *VolumeFlexcacheAttributesType `xml:"volume-flexcache-attributes"` - VolumeHybridCacheAttributesPtr *VolumeHybridCacheAttributesType `xml:"volume-hybrid-cache-attributes"` - VolumeIdAttributesPtr *VolumeIdAttributesType `xml:"volume-id-attributes"` - VolumeInfinitevolAttributesPtr *VolumeInfinitevolAttributesType `xml:"volume-infinitevol-attributes"` - VolumeInodeAttributesPtr *VolumeInodeAttributesType `xml:"volume-inode-attributes"` - VolumeLanguageAttributesPtr *VolumeLanguageAttributesType `xml:"volume-language-attributes"` - VolumeMirrorAttributesPtr *VolumeMirrorAttributesType `xml:"volume-mirror-attributes"` - VolumePerformanceAttributesPtr *VolumePerformanceAttributesType `xml:"volume-performance-attributes"` - VolumeQosAttributesPtr *VolumeQosAttributesType `xml:"volume-qos-attributes"` - VolumeSecurityAttributesPtr *VolumeSecurityAttributesType `xml:"volume-security-attributes"` - VolumeSisAttributesPtr *VolumeSisAttributesType `xml:"volume-sis-attributes"` - VolumeSnaplockAttributesPtr *VolumeSnaplockAttributesType `xml:"volume-snaplock-attributes"` - VolumeSnapshotAttributesPtr *VolumeSnapshotAttributesType `xml:"volume-snapshot-attributes"` - VolumeSnapshotAutodeleteAttributesPtr *VolumeSnapshotAutodeleteAttributesType `xml:"volume-snapshot-autodelete-attributes"` - VolumeSpaceAttributesPtr *VolumeSpaceAttributesType `xml:"volume-space-attributes"` - VolumeStateAttributesPtr *VolumeStateAttributesType `xml:"volume-state-attributes"` - VolumeTransitionAttributesPtr *VolumeTransitionAttributesType `xml:"volume-transition-attributes"` - VolumeVmAlignAttributesPtr *VolumeVmAlignAttributesType `xml:"volume-vm-align-attributes"` - VolumeVserverDrProtectionAttributesPtr *VolumeVserverDrProtectionAttributesType `xml:"volume-vserver-dr-protection-attributes"` -} - -// NewVolumeAttributesType is a factory method for creating new instances of VolumeAttributesType objects -func NewVolumeAttributesType() *VolumeAttributesType { - return &VolumeAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Encrypt is a 'getter' method -func (o *VolumeAttributesType) Encrypt() bool { - r := *o.EncryptPtr - return r -} - -// SetEncrypt is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetEncrypt(newValue bool) *VolumeAttributesType { - o.EncryptPtr = &newValue - return o -} - -// KeyId is a 'getter' method -func (o *VolumeAttributesType) KeyId() string { - r := *o.KeyIdPtr - return r -} - -// SetKeyId is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetKeyId(newValue string) *VolumeAttributesType { - o.KeyIdPtr = &newValue - return o -} - -// VolumeAntivirusAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeAntivirusAttributes() VolumeAntivirusAttributesType { - r := *o.VolumeAntivirusAttributesPtr - return r -} - -// SetVolumeAntivirusAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeAntivirusAttributes(newValue VolumeAntivirusAttributesType) *VolumeAttributesType { - o.VolumeAntivirusAttributesPtr = &newValue - return o -} - -// VolumeAutobalanceAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeAutobalanceAttributes() VolumeAutobalanceAttributesType { - r := *o.VolumeAutobalanceAttributesPtr - return r -} - -// SetVolumeAutobalanceAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeAutobalanceAttributes(newValue VolumeAutobalanceAttributesType) *VolumeAttributesType { - o.VolumeAutobalanceAttributesPtr = &newValue - return o -} - -// VolumeAutosizeAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeAutosizeAttributes() VolumeAutosizeAttributesType { - r := *o.VolumeAutosizeAttributesPtr - return r -} - -// SetVolumeAutosizeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeAutosizeAttributes(newValue VolumeAutosizeAttributesType) *VolumeAttributesType { - o.VolumeAutosizeAttributesPtr = &newValue - return o -} - -// VolumeCloneAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeCloneAttributes() VolumeCloneAttributesType { - r := *o.VolumeCloneAttributesPtr - return r -} - -// SetVolumeCloneAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeCloneAttributes(newValue VolumeCloneAttributesType) *VolumeAttributesType { - o.VolumeCloneAttributesPtr = &newValue - return o -} - -// VolumeCompAggrAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeCompAggrAttributes() VolumeCompAggrAttributesType { - r := *o.VolumeCompAggrAttributesPtr - return r -} - -// SetVolumeCompAggrAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeCompAggrAttributes(newValue VolumeCompAggrAttributesType) *VolumeAttributesType { - o.VolumeCompAggrAttributesPtr = &newValue - return o -} - -// VolumeDirectoryAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeDirectoryAttributes() VolumeDirectoryAttributesType { - r := *o.VolumeDirectoryAttributesPtr - return r -} - -// SetVolumeDirectoryAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeDirectoryAttributes(newValue VolumeDirectoryAttributesType) *VolumeAttributesType { - o.VolumeDirectoryAttributesPtr = &newValue - return o -} - -// VolumeExportAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeExportAttributes() VolumeExportAttributesType { - r := *o.VolumeExportAttributesPtr - return r -} - -// SetVolumeExportAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeExportAttributes(newValue VolumeExportAttributesType) *VolumeAttributesType { - o.VolumeExportAttributesPtr = &newValue - return o -} - -// VolumeFlexcacheAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeFlexcacheAttributes() VolumeFlexcacheAttributesType { - r := *o.VolumeFlexcacheAttributesPtr - return r -} - -// SetVolumeFlexcacheAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeFlexcacheAttributes(newValue VolumeFlexcacheAttributesType) *VolumeAttributesType { - o.VolumeFlexcacheAttributesPtr = &newValue - return o -} - -// VolumeHybridCacheAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeHybridCacheAttributes() VolumeHybridCacheAttributesType { - r := *o.VolumeHybridCacheAttributesPtr - return r -} - -// SetVolumeHybridCacheAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeHybridCacheAttributes(newValue VolumeHybridCacheAttributesType) *VolumeAttributesType { - o.VolumeHybridCacheAttributesPtr = &newValue - return o -} - -// VolumeIdAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeIdAttributes() VolumeIdAttributesType { - r := *o.VolumeIdAttributesPtr - return r -} - -// SetVolumeIdAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeIdAttributes(newValue VolumeIdAttributesType) *VolumeAttributesType { - o.VolumeIdAttributesPtr = &newValue - return o -} - -// VolumeInfinitevolAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeInfinitevolAttributes() VolumeInfinitevolAttributesType { - r := *o.VolumeInfinitevolAttributesPtr - return r -} - -// SetVolumeInfinitevolAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeInfinitevolAttributes(newValue VolumeInfinitevolAttributesType) *VolumeAttributesType { - o.VolumeInfinitevolAttributesPtr = &newValue - return o -} - -// VolumeInodeAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeInodeAttributes() VolumeInodeAttributesType { - r := *o.VolumeInodeAttributesPtr - return r -} - -// SetVolumeInodeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeInodeAttributes(newValue VolumeInodeAttributesType) *VolumeAttributesType { - o.VolumeInodeAttributesPtr = &newValue - return o -} - -// VolumeLanguageAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeLanguageAttributes() VolumeLanguageAttributesType { - r := *o.VolumeLanguageAttributesPtr - return r -} - -// SetVolumeLanguageAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeLanguageAttributes(newValue VolumeLanguageAttributesType) *VolumeAttributesType { - o.VolumeLanguageAttributesPtr = &newValue - return o -} - -// VolumeMirrorAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeMirrorAttributes() VolumeMirrorAttributesType { - r := *o.VolumeMirrorAttributesPtr - return r -} - -// SetVolumeMirrorAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeMirrorAttributes(newValue VolumeMirrorAttributesType) *VolumeAttributesType { - o.VolumeMirrorAttributesPtr = &newValue - return o -} - -// VolumePerformanceAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumePerformanceAttributes() VolumePerformanceAttributesType { - r := *o.VolumePerformanceAttributesPtr - return r -} - -// SetVolumePerformanceAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumePerformanceAttributes(newValue VolumePerformanceAttributesType) *VolumeAttributesType { - o.VolumePerformanceAttributesPtr = &newValue - return o -} - -// VolumeQosAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeQosAttributes() VolumeQosAttributesType { - r := *o.VolumeQosAttributesPtr - return r -} - -// SetVolumeQosAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeQosAttributes(newValue VolumeQosAttributesType) *VolumeAttributesType { - o.VolumeQosAttributesPtr = &newValue - return o -} - -// VolumeSecurityAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeSecurityAttributes() VolumeSecurityAttributesType { - r := *o.VolumeSecurityAttributesPtr - return r -} - -// SetVolumeSecurityAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeSecurityAttributes(newValue VolumeSecurityAttributesType) *VolumeAttributesType { - o.VolumeSecurityAttributesPtr = &newValue - return o -} - -// VolumeSisAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeSisAttributes() VolumeSisAttributesType { - r := *o.VolumeSisAttributesPtr - return r -} - -// SetVolumeSisAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeSisAttributes(newValue VolumeSisAttributesType) *VolumeAttributesType { - o.VolumeSisAttributesPtr = &newValue - return o -} - -// VolumeSnaplockAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeSnaplockAttributes() VolumeSnaplockAttributesType { - r := *o.VolumeSnaplockAttributesPtr - return r -} - -// SetVolumeSnaplockAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeSnaplockAttributes(newValue VolumeSnaplockAttributesType) *VolumeAttributesType { - o.VolumeSnaplockAttributesPtr = &newValue - return o -} - -// VolumeSnapshotAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeSnapshotAttributes() VolumeSnapshotAttributesType { - r := *o.VolumeSnapshotAttributesPtr - return r -} - -// SetVolumeSnapshotAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeSnapshotAttributes(newValue VolumeSnapshotAttributesType) *VolumeAttributesType { - o.VolumeSnapshotAttributesPtr = &newValue - return o -} - -// VolumeSnapshotAutodeleteAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeSnapshotAutodeleteAttributes() VolumeSnapshotAutodeleteAttributesType { - r := *o.VolumeSnapshotAutodeleteAttributesPtr - return r -} - -// SetVolumeSnapshotAutodeleteAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeSnapshotAutodeleteAttributes(newValue VolumeSnapshotAutodeleteAttributesType) *VolumeAttributesType { - o.VolumeSnapshotAutodeleteAttributesPtr = &newValue - return o -} - -// VolumeSpaceAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeSpaceAttributes() VolumeSpaceAttributesType { - r := *o.VolumeSpaceAttributesPtr - return r -} - -// SetVolumeSpaceAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeSpaceAttributes(newValue VolumeSpaceAttributesType) *VolumeAttributesType { - o.VolumeSpaceAttributesPtr = &newValue - return o -} - -// VolumeStateAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeStateAttributes() VolumeStateAttributesType { - r := *o.VolumeStateAttributesPtr - return r -} - -// SetVolumeStateAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeStateAttributes(newValue VolumeStateAttributesType) *VolumeAttributesType { - o.VolumeStateAttributesPtr = &newValue - return o -} - -// VolumeTransitionAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeTransitionAttributes() VolumeTransitionAttributesType { - r := *o.VolumeTransitionAttributesPtr - return r -} - -// SetVolumeTransitionAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeTransitionAttributes(newValue VolumeTransitionAttributesType) *VolumeAttributesType { - o.VolumeTransitionAttributesPtr = &newValue - return o -} - -// VolumeVmAlignAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeVmAlignAttributes() VolumeVmAlignAttributesType { - r := *o.VolumeVmAlignAttributesPtr - return r -} - -// SetVolumeVmAlignAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeVmAlignAttributes(newValue VolumeVmAlignAttributesType) *VolumeAttributesType { - o.VolumeVmAlignAttributesPtr = &newValue - return o -} - -// VolumeVserverDrProtectionAttributes is a 'getter' method -func (o *VolumeAttributesType) VolumeVserverDrProtectionAttributes() VolumeVserverDrProtectionAttributesType { - r := *o.VolumeVserverDrProtectionAttributesPtr - return r -} - -// SetVolumeVserverDrProtectionAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeAttributesType) SetVolumeVserverDrProtectionAttributes(newValue VolumeVserverDrProtectionAttributesType) *VolumeAttributesType { - o.VolumeVserverDrProtectionAttributesPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-autobalance-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-autobalance-attributes.go deleted file mode 100644 index b19ed5759..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-autobalance-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeAutobalanceAttributesType is a structure to represent a volume-autobalance-attributes ZAPI object -type VolumeAutobalanceAttributesType struct { - XMLName xml.Name `xml:"volume-autobalance-attributes"` - IsAutobalanceEligiblePtr *bool `xml:"is-autobalance-eligible"` -} - -// NewVolumeAutobalanceAttributesType is a factory method for creating new instances of VolumeAutobalanceAttributesType objects -func NewVolumeAutobalanceAttributesType() *VolumeAutobalanceAttributesType { - return &VolumeAutobalanceAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeAutobalanceAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeAutobalanceAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsAutobalanceEligible is a 'getter' method -func (o *VolumeAutobalanceAttributesType) IsAutobalanceEligible() bool { - r := *o.IsAutobalanceEligiblePtr - return r -} - -// SetIsAutobalanceEligible is a fluent style 'setter' method that can be chained -func (o *VolumeAutobalanceAttributesType) SetIsAutobalanceEligible(newValue bool) *VolumeAutobalanceAttributesType { - o.IsAutobalanceEligiblePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-autosize-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-autosize-attributes.go deleted file mode 100644 index 98b6bbdb8..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-autosize-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeAutosizeAttributesType is a structure to represent a volume-autosize-attributes ZAPI object -type VolumeAutosizeAttributesType struct { - XMLName xml.Name `xml:"volume-autosize-attributes"` - GrowThresholdPercentPtr *int `xml:"grow-threshold-percent"` - IsEnabledPtr *bool `xml:"is-enabled"` - MaximumSizePtr *int `xml:"maximum-size"` - MinimumSizePtr *int `xml:"minimum-size"` - ModePtr *string `xml:"mode"` - ResetPtr *bool `xml:"reset"` - ShrinkThresholdPercentPtr *int `xml:"shrink-threshold-percent"` -} - -// NewVolumeAutosizeAttributesType is a factory method for creating new instances of VolumeAutosizeAttributesType objects -func NewVolumeAutosizeAttributesType() *VolumeAutosizeAttributesType { - return &VolumeAutosizeAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeAutosizeAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeAutosizeAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// GrowThresholdPercent is a 'getter' method -func (o *VolumeAutosizeAttributesType) GrowThresholdPercent() int { - r := *o.GrowThresholdPercentPtr - return r -} - -// SetGrowThresholdPercent is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetGrowThresholdPercent(newValue int) *VolumeAutosizeAttributesType { - o.GrowThresholdPercentPtr = &newValue - return o -} - -// IsEnabled is a 'getter' method -func (o *VolumeAutosizeAttributesType) IsEnabled() bool { - r := *o.IsEnabledPtr - return r -} - -// SetIsEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetIsEnabled(newValue bool) *VolumeAutosizeAttributesType { - o.IsEnabledPtr = &newValue - return o -} - -// MaximumSize is a 'getter' method -func (o *VolumeAutosizeAttributesType) MaximumSize() int { - r := *o.MaximumSizePtr - return r -} - -// SetMaximumSize is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetMaximumSize(newValue int) *VolumeAutosizeAttributesType { - o.MaximumSizePtr = &newValue - return o -} - -// MinimumSize is a 'getter' method -func (o *VolumeAutosizeAttributesType) MinimumSize() int { - r := *o.MinimumSizePtr - return r -} - -// SetMinimumSize is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetMinimumSize(newValue int) *VolumeAutosizeAttributesType { - o.MinimumSizePtr = &newValue - return o -} - -// Mode is a 'getter' method -func (o *VolumeAutosizeAttributesType) Mode() string { - r := *o.ModePtr - return r -} - -// SetMode is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetMode(newValue string) *VolumeAutosizeAttributesType { - o.ModePtr = &newValue - return o -} - -// Reset is a 'getter' method -func (o *VolumeAutosizeAttributesType) Reset() bool { - r := *o.ResetPtr - return r -} - -// SetReset is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetReset(newValue bool) *VolumeAutosizeAttributesType { - o.ResetPtr = &newValue - return o -} - -// ShrinkThresholdPercent is a 'getter' method -func (o *VolumeAutosizeAttributesType) ShrinkThresholdPercent() int { - r := *o.ShrinkThresholdPercentPtr - return r -} - -// SetShrinkThresholdPercent is a fluent style 'setter' method that can be chained -func (o *VolumeAutosizeAttributesType) SetShrinkThresholdPercent(newValue int) *VolumeAutosizeAttributesType { - o.ShrinkThresholdPercentPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-clone-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-clone-attributes.go deleted file mode 100644 index 6bf6b0895..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-clone-attributes.go +++ /dev/null @@ -1,58 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCloneAttributesType is a structure to represent a volume-clone-attributes ZAPI object -type VolumeCloneAttributesType struct { - XMLName xml.Name `xml:"volume-clone-attributes"` - CloneChildCountPtr *int `xml:"clone-child-count"` - VolumeCloneParentAttributesPtr *VolumeCloneParentAttributesType `xml:"volume-clone-parent-attributes"` -} - -// NewVolumeCloneAttributesType is a factory method for creating new instances of VolumeCloneAttributesType objects -func NewVolumeCloneAttributesType() *VolumeCloneAttributesType { - return &VolumeCloneAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// CloneChildCount is a 'getter' method -func (o *VolumeCloneAttributesType) CloneChildCount() int { - r := *o.CloneChildCountPtr - return r -} - -// SetCloneChildCount is a fluent style 'setter' method that can be chained -func (o *VolumeCloneAttributesType) SetCloneChildCount(newValue int) *VolumeCloneAttributesType { - o.CloneChildCountPtr = &newValue - return o -} - -// VolumeCloneParentAttributes is a 'getter' method -func (o *VolumeCloneAttributesType) VolumeCloneParentAttributes() VolumeCloneParentAttributesType { - r := *o.VolumeCloneParentAttributesPtr - return r -} - -// SetVolumeCloneParentAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeCloneAttributesType) SetVolumeCloneParentAttributes(newValue VolumeCloneParentAttributesType) *VolumeCloneAttributesType { - o.VolumeCloneParentAttributesPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-clone-parent-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-clone-parent-attributes.go deleted file mode 100644 index 3260e5a73..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-clone-parent-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCloneParentAttributesType is a structure to represent a volume-clone-parent-attributes ZAPI object -type VolumeCloneParentAttributesType struct { - XMLName xml.Name `xml:"volume-clone-parent-attributes"` - DsidPtr *int `xml:"dsid"` - MsidPtr *int `xml:"msid"` - NamePtr *VolumeNameType `xml:"name"` - SnapshotIdPtr *int `xml:"snapshot-id"` - SnapshotNamePtr *string `xml:"snapshot-name"` - UuidPtr *UuidType `xml:"uuid"` - VserverNamePtr *VserverNameType `xml:"vserver-name"` -} - -// NewVolumeCloneParentAttributesType is a factory method for creating new instances of VolumeCloneParentAttributesType objects -func NewVolumeCloneParentAttributesType() *VolumeCloneParentAttributesType { - return &VolumeCloneParentAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCloneParentAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCloneParentAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Dsid is a 'getter' method -func (o *VolumeCloneParentAttributesType) Dsid() int { - r := *o.DsidPtr - return r -} - -// SetDsid is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetDsid(newValue int) *VolumeCloneParentAttributesType { - o.DsidPtr = &newValue - return o -} - -// Msid is a 'getter' method -func (o *VolumeCloneParentAttributesType) Msid() int { - r := *o.MsidPtr - return r -} - -// SetMsid is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetMsid(newValue int) *VolumeCloneParentAttributesType { - o.MsidPtr = &newValue - return o -} - -// Name is a 'getter' method -func (o *VolumeCloneParentAttributesType) Name() VolumeNameType { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetName(newValue VolumeNameType) *VolumeCloneParentAttributesType { - o.NamePtr = &newValue - return o -} - -// SnapshotId is a 'getter' method -func (o *VolumeCloneParentAttributesType) SnapshotId() int { - r := *o.SnapshotIdPtr - return r -} - -// SetSnapshotId is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetSnapshotId(newValue int) *VolumeCloneParentAttributesType { - o.SnapshotIdPtr = &newValue - return o -} - -// SnapshotName is a 'getter' method -func (o *VolumeCloneParentAttributesType) SnapshotName() string { - r := *o.SnapshotNamePtr - return r -} - -// SetSnapshotName is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetSnapshotName(newValue string) *VolumeCloneParentAttributesType { - o.SnapshotNamePtr = &newValue - return o -} - -// Uuid is a 'getter' method -func (o *VolumeCloneParentAttributesType) Uuid() UuidType { - r := *o.UuidPtr - return r -} - -// SetUuid is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetUuid(newValue UuidType) *VolumeCloneParentAttributesType { - o.UuidPtr = &newValue - return o -} - -// VserverName is a 'getter' method -func (o *VolumeCloneParentAttributesType) VserverName() VserverNameType { - r := *o.VserverNamePtr - return r -} - -// SetVserverName is a fluent style 'setter' method that can be chained -func (o *VolumeCloneParentAttributesType) SetVserverName(newValue VserverNameType) *VolumeCloneParentAttributesType { - o.VserverNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-comp-aggr-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-comp-aggr-attributes.go deleted file mode 100644 index 74060d316..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-comp-aggr-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeCompAggrAttributesType is a structure to represent a volume-comp-aggr-attributes ZAPI object -type VolumeCompAggrAttributesType struct { - XMLName xml.Name `xml:"volume-comp-aggr-attributes"` - TieringPolicyPtr *string `xml:"tiering-policy"` -} - -// NewVolumeCompAggrAttributesType is a factory method for creating new instances of VolumeCompAggrAttributesType objects -func NewVolumeCompAggrAttributesType() *VolumeCompAggrAttributesType { - return &VolumeCompAggrAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeCompAggrAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeCompAggrAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// TieringPolicy is a 'getter' method -func (o *VolumeCompAggrAttributesType) TieringPolicy() string { - r := *o.TieringPolicyPtr - return r -} - -// SetTieringPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeCompAggrAttributesType) SetTieringPolicy(newValue string) *VolumeCompAggrAttributesType { - o.TieringPolicyPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-directory-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-directory-attributes.go deleted file mode 100644 index efa16dea6..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-directory-attributes.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeDirectoryAttributesType is a structure to represent a volume-directory-attributes ZAPI object -type VolumeDirectoryAttributesType struct { - XMLName xml.Name `xml:"volume-directory-attributes"` - I2pEnabledPtr *bool `xml:"i2p-enabled"` - MaxDirSizePtr *int `xml:"max-dir-size"` - RootDirGenPtr *string `xml:"root-dir-gen"` -} - -// NewVolumeDirectoryAttributesType is a factory method for creating new instances of VolumeDirectoryAttributesType objects -func NewVolumeDirectoryAttributesType() *VolumeDirectoryAttributesType { - return &VolumeDirectoryAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeDirectoryAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeDirectoryAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// I2pEnabled is a 'getter' method -func (o *VolumeDirectoryAttributesType) I2pEnabled() bool { - r := *o.I2pEnabledPtr - return r -} - -// SetI2pEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeDirectoryAttributesType) SetI2pEnabled(newValue bool) *VolumeDirectoryAttributesType { - o.I2pEnabledPtr = &newValue - return o -} - -// MaxDirSize is a 'getter' method -func (o *VolumeDirectoryAttributesType) MaxDirSize() int { - r := *o.MaxDirSizePtr - return r -} - -// SetMaxDirSize is a fluent style 'setter' method that can be chained -func (o *VolumeDirectoryAttributesType) SetMaxDirSize(newValue int) *VolumeDirectoryAttributesType { - o.MaxDirSizePtr = &newValue - return o -} - -// RootDirGen is a 'getter' method -func (o *VolumeDirectoryAttributesType) RootDirGen() string { - r := *o.RootDirGenPtr - return r -} - -// SetRootDirGen is a fluent style 'setter' method that can be chained -func (o *VolumeDirectoryAttributesType) SetRootDirGen(newValue string) *VolumeDirectoryAttributesType { - o.RootDirGenPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-error.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-error.go deleted file mode 100644 index 34b97df7a..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-error.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeErrorType is a structure to represent a volume-error ZAPI object -type VolumeErrorType struct { - XMLName xml.Name `xml:"volume-error"` - ErrnoPtr *int `xml:"errno"` - NamePtr *VolumeNameType `xml:"name"` - ReasonPtr *string `xml:"reason"` - VserverPtr *string `xml:"vserver"` -} - -// NewVolumeErrorType is a factory method for creating new instances of VolumeErrorType objects -func NewVolumeErrorType() *VolumeErrorType { - return &VolumeErrorType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeErrorType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeErrorType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Errno is a 'getter' method -func (o *VolumeErrorType) Errno() int { - r := *o.ErrnoPtr - return r -} - -// SetErrno is a fluent style 'setter' method that can be chained -func (o *VolumeErrorType) SetErrno(newValue int) *VolumeErrorType { - o.ErrnoPtr = &newValue - return o -} - -// Name is a 'getter' method -func (o *VolumeErrorType) Name() VolumeNameType { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *VolumeErrorType) SetName(newValue VolumeNameType) *VolumeErrorType { - o.NamePtr = &newValue - return o -} - -// Reason is a 'getter' method -func (o *VolumeErrorType) Reason() string { - r := *o.ReasonPtr - return r -} - -// SetReason is a fluent style 'setter' method that can be chained -func (o *VolumeErrorType) SetReason(newValue string) *VolumeErrorType { - o.ReasonPtr = &newValue - return o -} - -// Vserver is a 'getter' method -func (o *VolumeErrorType) Vserver() string { - r := *o.VserverPtr - return r -} - -// SetVserver is a fluent style 'setter' method that can be chained -func (o *VolumeErrorType) SetVserver(newValue string) *VolumeErrorType { - o.VserverPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-export-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-export-attributes.go deleted file mode 100644 index 821b13562..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-export-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeExportAttributesType is a structure to represent a volume-export-attributes ZAPI object -type VolumeExportAttributesType struct { - XMLName xml.Name `xml:"volume-export-attributes"` - PolicyPtr *string `xml:"policy"` -} - -// NewVolumeExportAttributesType is a factory method for creating new instances of VolumeExportAttributesType objects -func NewVolumeExportAttributesType() *VolumeExportAttributesType { - return &VolumeExportAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeExportAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeExportAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Policy is a 'getter' method -func (o *VolumeExportAttributesType) Policy() string { - r := *o.PolicyPtr - return r -} - -// SetPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeExportAttributesType) SetPolicy(newValue string) *VolumeExportAttributesType { - o.PolicyPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-flexcache-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-flexcache-attributes.go deleted file mode 100644 index 02da20c90..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-flexcache-attributes.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeFlexcacheAttributesType is a structure to represent a volume-flexcache-attributes ZAPI object -type VolumeFlexcacheAttributesType struct { - XMLName xml.Name `xml:"volume-flexcache-attributes"` - CachePolicyPtr *CachePolicyType `xml:"cache-policy"` - FillPolicyPtr *CachePolicyType `xml:"fill-policy"` - MinReservePtr *SizeType `xml:"min-reserve"` - OriginPtr *VolumeNameType `xml:"origin"` -} - -// NewVolumeFlexcacheAttributesType is a factory method for creating new instances of VolumeFlexcacheAttributesType objects -func NewVolumeFlexcacheAttributesType() *VolumeFlexcacheAttributesType { - return &VolumeFlexcacheAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeFlexcacheAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeFlexcacheAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// CachePolicy is a 'getter' method -func (o *VolumeFlexcacheAttributesType) CachePolicy() CachePolicyType { - r := *o.CachePolicyPtr - return r -} - -// SetCachePolicy is a fluent style 'setter' method that can be chained -func (o *VolumeFlexcacheAttributesType) SetCachePolicy(newValue CachePolicyType) *VolumeFlexcacheAttributesType { - o.CachePolicyPtr = &newValue - return o -} - -// FillPolicy is a 'getter' method -func (o *VolumeFlexcacheAttributesType) FillPolicy() CachePolicyType { - r := *o.FillPolicyPtr - return r -} - -// SetFillPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeFlexcacheAttributesType) SetFillPolicy(newValue CachePolicyType) *VolumeFlexcacheAttributesType { - o.FillPolicyPtr = &newValue - return o -} - -// MinReserve is a 'getter' method -func (o *VolumeFlexcacheAttributesType) MinReserve() SizeType { - r := *o.MinReservePtr - return r -} - -// SetMinReserve is a fluent style 'setter' method that can be chained -func (o *VolumeFlexcacheAttributesType) SetMinReserve(newValue SizeType) *VolumeFlexcacheAttributesType { - o.MinReservePtr = &newValue - return o -} - -// Origin is a 'getter' method -func (o *VolumeFlexcacheAttributesType) Origin() VolumeNameType { - r := *o.OriginPtr - return r -} - -// SetOrigin is a fluent style 'setter' method that can be chained -func (o *VolumeFlexcacheAttributesType) SetOrigin(newValue VolumeNameType) *VolumeFlexcacheAttributesType { - o.OriginPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-hybrid-cache-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-hybrid-cache-attributes.go deleted file mode 100644 index 92235df37..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-hybrid-cache-attributes.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeHybridCacheAttributesType is a structure to represent a volume-hybrid-cache-attributes ZAPI object -type VolumeHybridCacheAttributesType struct { - XMLName xml.Name `xml:"volume-hybrid-cache-attributes"` - CacheRetentionPriorityPtr *string `xml:"cache-retention-priority"` - CachingPolicyPtr *string `xml:"caching-policy"` - EligibilityPtr *string `xml:"eligibility"` - WriteCacheIneligibilityReasonPtr *string `xml:"write-cache-ineligibility-reason"` -} - -// NewVolumeHybridCacheAttributesType is a factory method for creating new instances of VolumeHybridCacheAttributesType objects -func NewVolumeHybridCacheAttributesType() *VolumeHybridCacheAttributesType { - return &VolumeHybridCacheAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeHybridCacheAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeHybridCacheAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// CacheRetentionPriority is a 'getter' method -func (o *VolumeHybridCacheAttributesType) CacheRetentionPriority() string { - r := *o.CacheRetentionPriorityPtr - return r -} - -// SetCacheRetentionPriority is a fluent style 'setter' method that can be chained -func (o *VolumeHybridCacheAttributesType) SetCacheRetentionPriority(newValue string) *VolumeHybridCacheAttributesType { - o.CacheRetentionPriorityPtr = &newValue - return o -} - -// CachingPolicy is a 'getter' method -func (o *VolumeHybridCacheAttributesType) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeHybridCacheAttributesType) SetCachingPolicy(newValue string) *VolumeHybridCacheAttributesType { - o.CachingPolicyPtr = &newValue - return o -} - -// Eligibility is a 'getter' method -func (o *VolumeHybridCacheAttributesType) Eligibility() string { - r := *o.EligibilityPtr - return r -} - -// SetEligibility is a fluent style 'setter' method that can be chained -func (o *VolumeHybridCacheAttributesType) SetEligibility(newValue string) *VolumeHybridCacheAttributesType { - o.EligibilityPtr = &newValue - return o -} - -// WriteCacheIneligibilityReason is a 'getter' method -func (o *VolumeHybridCacheAttributesType) WriteCacheIneligibilityReason() string { - r := *o.WriteCacheIneligibilityReasonPtr - return r -} - -// SetWriteCacheIneligibilityReason is a fluent style 'setter' method that can be chained -func (o *VolumeHybridCacheAttributesType) SetWriteCacheIneligibilityReason(newValue string) *VolumeHybridCacheAttributesType { - o.WriteCacheIneligibilityReasonPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-id-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-id-attributes.go deleted file mode 100644 index 385581561..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-id-attributes.go +++ /dev/null @@ -1,451 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeIdAttributesType is a structure to represent a volume-id-attributes ZAPI object -type VolumeIdAttributesType struct { - XMLName xml.Name `xml:"volume-id-attributes"` - AggrListPtr *VolumeIdAttributesTypeAggrList `xml:"aggr-list"` - // work in progress - ApplicationPtr *string `xml:"application"` - ApplicationUuidPtr *UuidType `xml:"application-uuid"` - CommentPtr *string `xml:"comment"` - ContainingAggregateNamePtr *string `xml:"containing-aggregate-name"` - ContainingAggregateUuidPtr *UuidType `xml:"containing-aggregate-uuid"` - CreationTimePtr *int `xml:"creation-time"` - DsidPtr *int `xml:"dsid"` - ExtentSizePtr *string `xml:"extent-size"` - FlexcacheEndpointTypePtr *string `xml:"flexcache-endpoint-type"` - FlexgroupIndexPtr *int `xml:"flexgroup-index"` - FlexgroupMsidPtr *int `xml:"flexgroup-msid"` - FlexgroupUuidPtr *UuidType `xml:"flexgroup-uuid"` - FsidPtr *string `xml:"fsid"` - InstanceUuidPtr *UuidType `xml:"instance-uuid"` - JunctionParentNamePtr *VolumeNameType `xml:"junction-parent-name"` - JunctionPathPtr *JunctionPathType `xml:"junction-path"` - MsidPtr *int `xml:"msid"` - NamePtr *VolumeNameType `xml:"name"` - NameOrdinalPtr *string `xml:"name-ordinal"` - NodePtr *NodeNameType `xml:"node"` - NodesPtr *VolumeIdAttributesTypeNodes `xml:"nodes"` - // work in progress - OwningVserverNamePtr *string `xml:"owning-vserver-name"` - OwningVserverUuidPtr *UuidType `xml:"owning-vserver-uuid"` - ProvenanceUuidPtr *UuidType `xml:"provenance-uuid"` - StylePtr *VolstyleType `xml:"style"` - StyleExtendedPtr *string `xml:"style-extended"` - TypePtr *string `xml:"type"` - UuidPtr *UuidType `xml:"uuid"` -} - -// NewVolumeIdAttributesType is a factory method for creating new instances of VolumeIdAttributesType objects -func NewVolumeIdAttributesType() *VolumeIdAttributesType { - return &VolumeIdAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeIdAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeIdAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VolumeIdAttributesTypeAggrList is a wrapper -type VolumeIdAttributesTypeAggrList struct { - XMLName xml.Name `xml:"aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// AggrName is a 'getter' method -func (o *VolumeIdAttributesTypeAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesTypeAggrList) SetAggrName(newValue []AggrNameType) *VolumeIdAttributesTypeAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// AggrList is a 'getter' method -func (o *VolumeIdAttributesType) AggrList() VolumeIdAttributesTypeAggrList { - r := *o.AggrListPtr - return r -} - -// SetAggrList is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetAggrList(newValue VolumeIdAttributesTypeAggrList) *VolumeIdAttributesType { - o.AggrListPtr = &newValue - return o -} - -// Application is a 'getter' method -func (o *VolumeIdAttributesType) Application() string { - r := *o.ApplicationPtr - return r -} - -// SetApplication is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetApplication(newValue string) *VolumeIdAttributesType { - o.ApplicationPtr = &newValue - return o -} - -// ApplicationUuid is a 'getter' method -func (o *VolumeIdAttributesType) ApplicationUuid() UuidType { - r := *o.ApplicationUuidPtr - return r -} - -// SetApplicationUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetApplicationUuid(newValue UuidType) *VolumeIdAttributesType { - o.ApplicationUuidPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *VolumeIdAttributesType) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetComment(newValue string) *VolumeIdAttributesType { - o.CommentPtr = &newValue - return o -} - -// ContainingAggregateName is a 'getter' method -func (o *VolumeIdAttributesType) ContainingAggregateName() string { - r := *o.ContainingAggregateNamePtr - return r -} - -// SetContainingAggregateName is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetContainingAggregateName(newValue string) *VolumeIdAttributesType { - o.ContainingAggregateNamePtr = &newValue - return o -} - -// ContainingAggregateUuid is a 'getter' method -func (o *VolumeIdAttributesType) ContainingAggregateUuid() UuidType { - r := *o.ContainingAggregateUuidPtr - return r -} - -// SetContainingAggregateUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetContainingAggregateUuid(newValue UuidType) *VolumeIdAttributesType { - o.ContainingAggregateUuidPtr = &newValue - return o -} - -// CreationTime is a 'getter' method -func (o *VolumeIdAttributesType) CreationTime() int { - r := *o.CreationTimePtr - return r -} - -// SetCreationTime is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetCreationTime(newValue int) *VolumeIdAttributesType { - o.CreationTimePtr = &newValue - return o -} - -// Dsid is a 'getter' method -func (o *VolumeIdAttributesType) Dsid() int { - r := *o.DsidPtr - return r -} - -// SetDsid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetDsid(newValue int) *VolumeIdAttributesType { - o.DsidPtr = &newValue - return o -} - -// ExtentSize is a 'getter' method -func (o *VolumeIdAttributesType) ExtentSize() string { - r := *o.ExtentSizePtr - return r -} - -// SetExtentSize is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetExtentSize(newValue string) *VolumeIdAttributesType { - o.ExtentSizePtr = &newValue - return o -} - -// FlexcacheEndpointType is a 'getter' method -func (o *VolumeIdAttributesType) FlexcacheEndpointType() string { - r := *o.FlexcacheEndpointTypePtr - return r -} - -// SetFlexcacheEndpointType is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetFlexcacheEndpointType(newValue string) *VolumeIdAttributesType { - o.FlexcacheEndpointTypePtr = &newValue - return o -} - -// FlexgroupIndex is a 'getter' method -func (o *VolumeIdAttributesType) FlexgroupIndex() int { - r := *o.FlexgroupIndexPtr - return r -} - -// SetFlexgroupIndex is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetFlexgroupIndex(newValue int) *VolumeIdAttributesType { - o.FlexgroupIndexPtr = &newValue - return o -} - -// FlexgroupMsid is a 'getter' method -func (o *VolumeIdAttributesType) FlexgroupMsid() int { - r := *o.FlexgroupMsidPtr - return r -} - -// SetFlexgroupMsid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetFlexgroupMsid(newValue int) *VolumeIdAttributesType { - o.FlexgroupMsidPtr = &newValue - return o -} - -// FlexgroupUuid is a 'getter' method -func (o *VolumeIdAttributesType) FlexgroupUuid() UuidType { - r := *o.FlexgroupUuidPtr - return r -} - -// SetFlexgroupUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetFlexgroupUuid(newValue UuidType) *VolumeIdAttributesType { - o.FlexgroupUuidPtr = &newValue - return o -} - -// Fsid is a 'getter' method -func (o *VolumeIdAttributesType) Fsid() string { - r := *o.FsidPtr - return r -} - -// SetFsid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetFsid(newValue string) *VolumeIdAttributesType { - o.FsidPtr = &newValue - return o -} - -// InstanceUuid is a 'getter' method -func (o *VolumeIdAttributesType) InstanceUuid() UuidType { - r := *o.InstanceUuidPtr - return r -} - -// SetInstanceUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetInstanceUuid(newValue UuidType) *VolumeIdAttributesType { - o.InstanceUuidPtr = &newValue - return o -} - -// JunctionParentName is a 'getter' method -func (o *VolumeIdAttributesType) JunctionParentName() VolumeNameType { - r := *o.JunctionParentNamePtr - return r -} - -// SetJunctionParentName is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetJunctionParentName(newValue VolumeNameType) *VolumeIdAttributesType { - o.JunctionParentNamePtr = &newValue - return o -} - -// JunctionPath is a 'getter' method -func (o *VolumeIdAttributesType) JunctionPath() JunctionPathType { - r := *o.JunctionPathPtr - return r -} - -// SetJunctionPath is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetJunctionPath(newValue JunctionPathType) *VolumeIdAttributesType { - o.JunctionPathPtr = &newValue - return o -} - -// Msid is a 'getter' method -func (o *VolumeIdAttributesType) Msid() int { - r := *o.MsidPtr - return r -} - -// SetMsid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetMsid(newValue int) *VolumeIdAttributesType { - o.MsidPtr = &newValue - return o -} - -// Name is a 'getter' method -func (o *VolumeIdAttributesType) Name() VolumeNameType { - r := *o.NamePtr - return r -} - -// SetName is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetName(newValue VolumeNameType) *VolumeIdAttributesType { - o.NamePtr = &newValue - return o -} - -// NameOrdinal is a 'getter' method -func (o *VolumeIdAttributesType) NameOrdinal() string { - r := *o.NameOrdinalPtr - return r -} - -// SetNameOrdinal is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetNameOrdinal(newValue string) *VolumeIdAttributesType { - o.NameOrdinalPtr = &newValue - return o -} - -// Node is a 'getter' method -func (o *VolumeIdAttributesType) Node() NodeNameType { - r := *o.NodePtr - return r -} - -// SetNode is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetNode(newValue NodeNameType) *VolumeIdAttributesType { - o.NodePtr = &newValue - return o -} - -// VolumeIdAttributesTypeNodes is a wrapper -type VolumeIdAttributesTypeNodes struct { - XMLName xml.Name `xml:"nodes"` - NodeNamePtr []NodeNameType `xml:"node-name"` -} - -// NodeName is a 'getter' method -func (o *VolumeIdAttributesTypeNodes) NodeName() []NodeNameType { - r := o.NodeNamePtr - return r -} - -// SetNodeName is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesTypeNodes) SetNodeName(newValue []NodeNameType) *VolumeIdAttributesTypeNodes { - newSlice := make([]NodeNameType, len(newValue)) - copy(newSlice, newValue) - o.NodeNamePtr = newSlice - return o -} - -// Nodes is a 'getter' method -func (o *VolumeIdAttributesType) Nodes() VolumeIdAttributesTypeNodes { - r := *o.NodesPtr - return r -} - -// SetNodes is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetNodes(newValue VolumeIdAttributesTypeNodes) *VolumeIdAttributesType { - o.NodesPtr = &newValue - return o -} - -// OwningVserverName is a 'getter' method -func (o *VolumeIdAttributesType) OwningVserverName() string { - r := *o.OwningVserverNamePtr - return r -} - -// SetOwningVserverName is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetOwningVserverName(newValue string) *VolumeIdAttributesType { - o.OwningVserverNamePtr = &newValue - return o -} - -// OwningVserverUuid is a 'getter' method -func (o *VolumeIdAttributesType) OwningVserverUuid() UuidType { - r := *o.OwningVserverUuidPtr - return r -} - -// SetOwningVserverUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetOwningVserverUuid(newValue UuidType) *VolumeIdAttributesType { - o.OwningVserverUuidPtr = &newValue - return o -} - -// ProvenanceUuid is a 'getter' method -func (o *VolumeIdAttributesType) ProvenanceUuid() UuidType { - r := *o.ProvenanceUuidPtr - return r -} - -// SetProvenanceUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetProvenanceUuid(newValue UuidType) *VolumeIdAttributesType { - o.ProvenanceUuidPtr = &newValue - return o -} - -// Style is a 'getter' method -func (o *VolumeIdAttributesType) Style() VolstyleType { - r := *o.StylePtr - return r -} - -// SetStyle is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetStyle(newValue VolstyleType) *VolumeIdAttributesType { - o.StylePtr = &newValue - return o -} - -// StyleExtended is a 'getter' method -func (o *VolumeIdAttributesType) StyleExtended() string { - r := *o.StyleExtendedPtr - return r -} - -// SetStyleExtended is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetStyleExtended(newValue string) *VolumeIdAttributesType { - o.StyleExtendedPtr = &newValue - return o -} - -// Type is a 'getter' method -func (o *VolumeIdAttributesType) Type() string { - r := *o.TypePtr - return r -} - -// SetType is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetType(newValue string) *VolumeIdAttributesType { - o.TypePtr = &newValue - return o -} - -// Uuid is a 'getter' method -func (o *VolumeIdAttributesType) Uuid() UuidType { - r := *o.UuidPtr - return r -} - -// SetUuid is a fluent style 'setter' method that can be chained -func (o *VolumeIdAttributesType) SetUuid(newValue UuidType) *VolumeIdAttributesType { - o.UuidPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-infinitevol-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-infinitevol-attributes.go deleted file mode 100644 index 443b394c7..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-infinitevol-attributes.go +++ /dev/null @@ -1,144 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeInfinitevolAttributesType is a structure to represent a volume-infinitevol-attributes ZAPI object -type VolumeInfinitevolAttributesType struct { - XMLName xml.Name `xml:"volume-infinitevol-attributes"` - ConstituentRolePtr *ReposConstituentRoleType `xml:"constituent-role"` - EnableSnapdiffPtr *bool `xml:"enable-snapdiff"` - IsManagedByServicePtr *bool `xml:"is-managed-by-service"` - MaxDataConstituentSizePtr *SizeType `xml:"max-data-constituent-size"` - MaxNamespaceConstituentSizePtr *SizeType `xml:"max-namespace-constituent-size"` - NamespaceMirrorAggrListPtr *VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList `xml:"namespace-mirror-aggr-list"` - // work in progress - StorageServicePtr *string `xml:"storage-service"` -} - -// NewVolumeInfinitevolAttributesType is a factory method for creating new instances of VolumeInfinitevolAttributesType objects -func NewVolumeInfinitevolAttributesType() *VolumeInfinitevolAttributesType { - return &VolumeInfinitevolAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeInfinitevolAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeInfinitevolAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ConstituentRole is a 'getter' method -func (o *VolumeInfinitevolAttributesType) ConstituentRole() ReposConstituentRoleType { - r := *o.ConstituentRolePtr - return r -} - -// SetConstituentRole is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetConstituentRole(newValue ReposConstituentRoleType) *VolumeInfinitevolAttributesType { - o.ConstituentRolePtr = &newValue - return o -} - -// EnableSnapdiff is a 'getter' method -func (o *VolumeInfinitevolAttributesType) EnableSnapdiff() bool { - r := *o.EnableSnapdiffPtr - return r -} - -// SetEnableSnapdiff is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetEnableSnapdiff(newValue bool) *VolumeInfinitevolAttributesType { - o.EnableSnapdiffPtr = &newValue - return o -} - -// IsManagedByService is a 'getter' method -func (o *VolumeInfinitevolAttributesType) IsManagedByService() bool { - r := *o.IsManagedByServicePtr - return r -} - -// SetIsManagedByService is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetIsManagedByService(newValue bool) *VolumeInfinitevolAttributesType { - o.IsManagedByServicePtr = &newValue - return o -} - -// MaxDataConstituentSize is a 'getter' method -func (o *VolumeInfinitevolAttributesType) MaxDataConstituentSize() SizeType { - r := *o.MaxDataConstituentSizePtr - return r -} - -// SetMaxDataConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetMaxDataConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType { - o.MaxDataConstituentSizePtr = &newValue - return o -} - -// MaxNamespaceConstituentSize is a 'getter' method -func (o *VolumeInfinitevolAttributesType) MaxNamespaceConstituentSize() SizeType { - r := *o.MaxNamespaceConstituentSizePtr - return r -} - -// SetMaxNamespaceConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetMaxNamespaceConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType { - o.MaxNamespaceConstituentSizePtr = &newValue - return o -} - -// VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList is a wrapper -type VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList struct { - XMLName xml.Name `xml:"namespace-mirror-aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// AggrName is a 'getter' method -func (o *VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList) SetAggrName(newValue []AggrNameType) *VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// NamespaceMirrorAggrList is a 'getter' method -func (o *VolumeInfinitevolAttributesType) NamespaceMirrorAggrList() VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList { - r := *o.NamespaceMirrorAggrListPtr - return r -} - -// SetNamespaceMirrorAggrList is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetNamespaceMirrorAggrList(newValue VolumeInfinitevolAttributesTypeNamespaceMirrorAggrList) *VolumeInfinitevolAttributesType { - o.NamespaceMirrorAggrListPtr = &newValue - return o -} - -// StorageService is a 'getter' method -func (o *VolumeInfinitevolAttributesType) StorageService() string { - r := *o.StorageServicePtr - return r -} - -// SetStorageService is a fluent style 'setter' method that can be chained -func (o *VolumeInfinitevolAttributesType) SetStorageService(newValue string) *VolumeInfinitevolAttributesType { - o.StorageServicePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-inode-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-inode-attributes.go deleted file mode 100644 index 284c691ad..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-inode-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeInodeAttributesType is a structure to represent a volume-inode-attributes ZAPI object -type VolumeInodeAttributesType struct { - XMLName xml.Name `xml:"volume-inode-attributes"` - BlockTypePtr *string `xml:"block-type"` - FilesPrivateUsedPtr *int `xml:"files-private-used"` - FilesTotalPtr *int `xml:"files-total"` - FilesUsedPtr *int `xml:"files-used"` - InodefilePrivateCapacityPtr *int `xml:"inodefile-private-capacity"` - InodefilePublicCapacityPtr *int `xml:"inodefile-public-capacity"` - InofileVersionPtr *int `xml:"inofile-version"` -} - -// NewVolumeInodeAttributesType is a factory method for creating new instances of VolumeInodeAttributesType objects -func NewVolumeInodeAttributesType() *VolumeInodeAttributesType { - return &VolumeInodeAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeInodeAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeInodeAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BlockType is a 'getter' method -func (o *VolumeInodeAttributesType) BlockType() string { - r := *o.BlockTypePtr - return r -} - -// SetBlockType is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetBlockType(newValue string) *VolumeInodeAttributesType { - o.BlockTypePtr = &newValue - return o -} - -// FilesPrivateUsed is a 'getter' method -func (o *VolumeInodeAttributesType) FilesPrivateUsed() int { - r := *o.FilesPrivateUsedPtr - return r -} - -// SetFilesPrivateUsed is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetFilesPrivateUsed(newValue int) *VolumeInodeAttributesType { - o.FilesPrivateUsedPtr = &newValue - return o -} - -// FilesTotal is a 'getter' method -func (o *VolumeInodeAttributesType) FilesTotal() int { - r := *o.FilesTotalPtr - return r -} - -// SetFilesTotal is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetFilesTotal(newValue int) *VolumeInodeAttributesType { - o.FilesTotalPtr = &newValue - return o -} - -// FilesUsed is a 'getter' method -func (o *VolumeInodeAttributesType) FilesUsed() int { - r := *o.FilesUsedPtr - return r -} - -// SetFilesUsed is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetFilesUsed(newValue int) *VolumeInodeAttributesType { - o.FilesUsedPtr = &newValue - return o -} - -// InodefilePrivateCapacity is a 'getter' method -func (o *VolumeInodeAttributesType) InodefilePrivateCapacity() int { - r := *o.InodefilePrivateCapacityPtr - return r -} - -// SetInodefilePrivateCapacity is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetInodefilePrivateCapacity(newValue int) *VolumeInodeAttributesType { - o.InodefilePrivateCapacityPtr = &newValue - return o -} - -// InodefilePublicCapacity is a 'getter' method -func (o *VolumeInodeAttributesType) InodefilePublicCapacity() int { - r := *o.InodefilePublicCapacityPtr - return r -} - -// SetInodefilePublicCapacity is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetInodefilePublicCapacity(newValue int) *VolumeInodeAttributesType { - o.InodefilePublicCapacityPtr = &newValue - return o -} - -// InofileVersion is a 'getter' method -func (o *VolumeInodeAttributesType) InofileVersion() int { - r := *o.InofileVersionPtr - return r -} - -// SetInofileVersion is a fluent style 'setter' method that can be chained -func (o *VolumeInodeAttributesType) SetInofileVersion(newValue int) *VolumeInodeAttributesType { - o.InofileVersionPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-language-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-language-attributes.go deleted file mode 100644 index aa14682e9..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-language-attributes.go +++ /dev/null @@ -1,110 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeLanguageAttributesType is a structure to represent a volume-language-attributes ZAPI object -type VolumeLanguageAttributesType struct { - XMLName xml.Name `xml:"volume-language-attributes"` - IsConvertUcodeEnabledPtr *bool `xml:"is-convert-ucode-enabled"` - IsCreateUcodeEnabledPtr *bool `xml:"is-create-ucode-enabled"` - LanguagePtr *string `xml:"language"` - LanguageCodePtr *LanguageCodeType `xml:"language-code"` - NfsCharacterSetPtr *string `xml:"nfs-character-set"` - OemCharacterSetPtr *string `xml:"oem-character-set"` -} - -// NewVolumeLanguageAttributesType is a factory method for creating new instances of VolumeLanguageAttributesType objects -func NewVolumeLanguageAttributesType() *VolumeLanguageAttributesType { - return &VolumeLanguageAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeLanguageAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeLanguageAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsConvertUcodeEnabled is a 'getter' method -func (o *VolumeLanguageAttributesType) IsConvertUcodeEnabled() bool { - r := *o.IsConvertUcodeEnabledPtr - return r -} - -// SetIsConvertUcodeEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeLanguageAttributesType) SetIsConvertUcodeEnabled(newValue bool) *VolumeLanguageAttributesType { - o.IsConvertUcodeEnabledPtr = &newValue - return o -} - -// IsCreateUcodeEnabled is a 'getter' method -func (o *VolumeLanguageAttributesType) IsCreateUcodeEnabled() bool { - r := *o.IsCreateUcodeEnabledPtr - return r -} - -// SetIsCreateUcodeEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeLanguageAttributesType) SetIsCreateUcodeEnabled(newValue bool) *VolumeLanguageAttributesType { - o.IsCreateUcodeEnabledPtr = &newValue - return o -} - -// Language is a 'getter' method -func (o *VolumeLanguageAttributesType) Language() string { - r := *o.LanguagePtr - return r -} - -// SetLanguage is a fluent style 'setter' method that can be chained -func (o *VolumeLanguageAttributesType) SetLanguage(newValue string) *VolumeLanguageAttributesType { - o.LanguagePtr = &newValue - return o -} - -// LanguageCode is a 'getter' method -func (o *VolumeLanguageAttributesType) LanguageCode() LanguageCodeType { - r := *o.LanguageCodePtr - return r -} - -// SetLanguageCode is a fluent style 'setter' method that can be chained -func (o *VolumeLanguageAttributesType) SetLanguageCode(newValue LanguageCodeType) *VolumeLanguageAttributesType { - o.LanguageCodePtr = &newValue - return o -} - -// NfsCharacterSet is a 'getter' method -func (o *VolumeLanguageAttributesType) NfsCharacterSet() string { - r := *o.NfsCharacterSetPtr - return r -} - -// SetNfsCharacterSet is a fluent style 'setter' method that can be chained -func (o *VolumeLanguageAttributesType) SetNfsCharacterSet(newValue string) *VolumeLanguageAttributesType { - o.NfsCharacterSetPtr = &newValue - return o -} - -// OemCharacterSet is a 'getter' method -func (o *VolumeLanguageAttributesType) OemCharacterSet() string { - r := *o.OemCharacterSetPtr - return r -} - -// SetOemCharacterSet is a fluent style 'setter' method that can be chained -func (o *VolumeLanguageAttributesType) SetOemCharacterSet(newValue string) *VolumeLanguageAttributesType { - o.OemCharacterSetPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-mirror-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-mirror-attributes.go deleted file mode 100644 index 3d689b5e3..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-mirror-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeMirrorAttributesType is a structure to represent a volume-mirror-attributes ZAPI object -type VolumeMirrorAttributesType struct { - XMLName xml.Name `xml:"volume-mirror-attributes"` - IsDataProtectionMirrorPtr *bool `xml:"is-data-protection-mirror"` - IsLoadSharingMirrorPtr *bool `xml:"is-load-sharing-mirror"` - IsMoveMirrorPtr *bool `xml:"is-move-mirror"` - IsReplicaVolumePtr *bool `xml:"is-replica-volume"` - IsSnapmirrorSourcePtr *bool `xml:"is-snapmirror-source"` - MirrorTransferInProgressPtr *bool `xml:"mirror-transfer-in-progress"` - RedirectSnapshotIdPtr *int `xml:"redirect-snapshot-id"` -} - -// NewVolumeMirrorAttributesType is a factory method for creating new instances of VolumeMirrorAttributesType objects -func NewVolumeMirrorAttributesType() *VolumeMirrorAttributesType { - return &VolumeMirrorAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeMirrorAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeMirrorAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsDataProtectionMirror is a 'getter' method -func (o *VolumeMirrorAttributesType) IsDataProtectionMirror() bool { - r := *o.IsDataProtectionMirrorPtr - return r -} - -// SetIsDataProtectionMirror is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetIsDataProtectionMirror(newValue bool) *VolumeMirrorAttributesType { - o.IsDataProtectionMirrorPtr = &newValue - return o -} - -// IsLoadSharingMirror is a 'getter' method -func (o *VolumeMirrorAttributesType) IsLoadSharingMirror() bool { - r := *o.IsLoadSharingMirrorPtr - return r -} - -// SetIsLoadSharingMirror is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetIsLoadSharingMirror(newValue bool) *VolumeMirrorAttributesType { - o.IsLoadSharingMirrorPtr = &newValue - return o -} - -// IsMoveMirror is a 'getter' method -func (o *VolumeMirrorAttributesType) IsMoveMirror() bool { - r := *o.IsMoveMirrorPtr - return r -} - -// SetIsMoveMirror is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetIsMoveMirror(newValue bool) *VolumeMirrorAttributesType { - o.IsMoveMirrorPtr = &newValue - return o -} - -// IsReplicaVolume is a 'getter' method -func (o *VolumeMirrorAttributesType) IsReplicaVolume() bool { - r := *o.IsReplicaVolumePtr - return r -} - -// SetIsReplicaVolume is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetIsReplicaVolume(newValue bool) *VolumeMirrorAttributesType { - o.IsReplicaVolumePtr = &newValue - return o -} - -// IsSnapmirrorSource is a 'getter' method -func (o *VolumeMirrorAttributesType) IsSnapmirrorSource() bool { - r := *o.IsSnapmirrorSourcePtr - return r -} - -// SetIsSnapmirrorSource is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetIsSnapmirrorSource(newValue bool) *VolumeMirrorAttributesType { - o.IsSnapmirrorSourcePtr = &newValue - return o -} - -// MirrorTransferInProgress is a 'getter' method -func (o *VolumeMirrorAttributesType) MirrorTransferInProgress() bool { - r := *o.MirrorTransferInProgressPtr - return r -} - -// SetMirrorTransferInProgress is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetMirrorTransferInProgress(newValue bool) *VolumeMirrorAttributesType { - o.MirrorTransferInProgressPtr = &newValue - return o -} - -// RedirectSnapshotId is a 'getter' method -func (o *VolumeMirrorAttributesType) RedirectSnapshotId() int { - r := *o.RedirectSnapshotIdPtr - return r -} - -// SetRedirectSnapshotId is a fluent style 'setter' method that can be chained -func (o *VolumeMirrorAttributesType) SetRedirectSnapshotId(newValue int) *VolumeMirrorAttributesType { - o.RedirectSnapshotIdPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-modify-iter-async-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-modify-iter-async-info.go deleted file mode 100644 index 7618763f4..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-modify-iter-async-info.go +++ /dev/null @@ -1,116 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeModifyIterAsyncInfoType is a structure to represent a volume-modify-iter-async-info ZAPI object -type VolumeModifyIterAsyncInfoType struct { - XMLName xml.Name `xml:"volume-modify-iter-async-info"` - ErrorCodePtr *int `xml:"error-code"` - ErrorMessagePtr *string `xml:"error-message"` - JobidPtr *int `xml:"jobid"` - StatusPtr *string `xml:"status"` - VolumeKeyPtr *VolumeModifyIterAsyncInfoTypeVolumeKey `xml:"volume-key"` - // work in progress -} - -// NewVolumeModifyIterAsyncInfoType is a factory method for creating new instances of VolumeModifyIterAsyncInfoType objects -func NewVolumeModifyIterAsyncInfoType() *VolumeModifyIterAsyncInfoType { - return &VolumeModifyIterAsyncInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterAsyncInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterAsyncInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ErrorCode is a 'getter' method -func (o *VolumeModifyIterAsyncInfoType) ErrorCode() int { - r := *o.ErrorCodePtr - return r -} - -// SetErrorCode is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncInfoType) SetErrorCode(newValue int) *VolumeModifyIterAsyncInfoType { - o.ErrorCodePtr = &newValue - return o -} - -// ErrorMessage is a 'getter' method -func (o *VolumeModifyIterAsyncInfoType) ErrorMessage() string { - r := *o.ErrorMessagePtr - return r -} - -// SetErrorMessage is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncInfoType) SetErrorMessage(newValue string) *VolumeModifyIterAsyncInfoType { - o.ErrorMessagePtr = &newValue - return o -} - -// Jobid is a 'getter' method -func (o *VolumeModifyIterAsyncInfoType) Jobid() int { - r := *o.JobidPtr - return r -} - -// SetJobid is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncInfoType) SetJobid(newValue int) *VolumeModifyIterAsyncInfoType { - o.JobidPtr = &newValue - return o -} - -// Status is a 'getter' method -func (o *VolumeModifyIterAsyncInfoType) Status() string { - r := *o.StatusPtr - return r -} - -// SetStatus is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncInfoType) SetStatus(newValue string) *VolumeModifyIterAsyncInfoType { - o.StatusPtr = &newValue - return o -} - -// VolumeModifyIterAsyncInfoTypeVolumeKey is a wrapper -type VolumeModifyIterAsyncInfoTypeVolumeKey struct { - XMLName xml.Name `xml:"volume-key"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeModifyIterAsyncInfoTypeVolumeKey) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncInfoTypeVolumeKey) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeModifyIterAsyncInfoTypeVolumeKey { - o.VolumeAttributesPtr = &newValue - return o -} - -// VolumeKey is a 'getter' method -func (o *VolumeModifyIterAsyncInfoType) VolumeKey() VolumeModifyIterAsyncInfoTypeVolumeKey { - r := *o.VolumeKeyPtr - return r -} - -// SetVolumeKey is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterAsyncInfoType) SetVolumeKey(newValue VolumeModifyIterAsyncInfoTypeVolumeKey) *VolumeModifyIterAsyncInfoType { - o.VolumeKeyPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-modify-iter-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-modify-iter-info.go deleted file mode 100644 index 0e4ebc736..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-modify-iter-info.go +++ /dev/null @@ -1,90 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeModifyIterInfoType is a structure to represent a volume-modify-iter-info ZAPI object -type VolumeModifyIterInfoType struct { - XMLName xml.Name `xml:"volume-modify-iter-info"` - ErrorCodePtr *int `xml:"error-code"` - ErrorMessagePtr *string `xml:"error-message"` - VolumeKeyPtr *VolumeModifyIterInfoTypeVolumeKey `xml:"volume-key"` - // work in progress -} - -// NewVolumeModifyIterInfoType is a factory method for creating new instances of VolumeModifyIterInfoType objects -func NewVolumeModifyIterInfoType() *VolumeModifyIterInfoType { - return &VolumeModifyIterInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeModifyIterInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeModifyIterInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ErrorCode is a 'getter' method -func (o *VolumeModifyIterInfoType) ErrorCode() int { - r := *o.ErrorCodePtr - return r -} - -// SetErrorCode is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterInfoType) SetErrorCode(newValue int) *VolumeModifyIterInfoType { - o.ErrorCodePtr = &newValue - return o -} - -// ErrorMessage is a 'getter' method -func (o *VolumeModifyIterInfoType) ErrorMessage() string { - r := *o.ErrorMessagePtr - return r -} - -// SetErrorMessage is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterInfoType) SetErrorMessage(newValue string) *VolumeModifyIterInfoType { - o.ErrorMessagePtr = &newValue - return o -} - -// VolumeModifyIterInfoTypeVolumeKey is a wrapper -type VolumeModifyIterInfoTypeVolumeKey struct { - XMLName xml.Name `xml:"volume-key"` - VolumeAttributesPtr *VolumeAttributesType `xml:"volume-attributes"` -} - -// VolumeAttributes is a 'getter' method -func (o *VolumeModifyIterInfoTypeVolumeKey) VolumeAttributes() VolumeAttributesType { - r := *o.VolumeAttributesPtr - return r -} - -// SetVolumeAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterInfoTypeVolumeKey) SetVolumeAttributes(newValue VolumeAttributesType) *VolumeModifyIterInfoTypeVolumeKey { - o.VolumeAttributesPtr = &newValue - return o -} - -// VolumeKey is a 'getter' method -func (o *VolumeModifyIterInfoType) VolumeKey() VolumeModifyIterInfoTypeVolumeKey { - r := *o.VolumeKeyPtr - return r -} - -// SetVolumeKey is a fluent style 'setter' method that can be chained -func (o *VolumeModifyIterInfoType) SetVolumeKey(newValue VolumeModifyIterInfoTypeVolumeKey) *VolumeModifyIterInfoType { - o.VolumeKeyPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-name.go deleted file mode 100644 index fc923b6a8..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-name.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// VolumeNameType is a structure to represent a volume-name ZAPI object -type VolumeNameType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-performance-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-performance-attributes.go deleted file mode 100644 index aa3dd0816..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-performance-attributes.go +++ /dev/null @@ -1,123 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumePerformanceAttributesType is a structure to represent a volume-performance-attributes ZAPI object -type VolumePerformanceAttributesType struct { - XMLName xml.Name `xml:"volume-performance-attributes"` - ExtentEnabledPtr *string `xml:"extent-enabled"` - FcDelegsEnabledPtr *bool `xml:"fc-delegs-enabled"` - IsAtimeUpdateEnabledPtr *bool `xml:"is-atime-update-enabled"` - MaxWriteAllocBlocksPtr *int `xml:"max-write-alloc-blocks"` - MinimalReadAheadPtr *bool `xml:"minimal-read-ahead"` - ReadReallocPtr *string `xml:"read-realloc"` - SingleInstanceDataLoggingPtr *string `xml:"single-instance-data-logging"` -} - -// NewVolumePerformanceAttributesType is a factory method for creating new instances of VolumePerformanceAttributesType objects -func NewVolumePerformanceAttributesType() *VolumePerformanceAttributesType { - return &VolumePerformanceAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumePerformanceAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumePerformanceAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExtentEnabled is a 'getter' method -func (o *VolumePerformanceAttributesType) ExtentEnabled() string { - r := *o.ExtentEnabledPtr - return r -} - -// SetExtentEnabled is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetExtentEnabled(newValue string) *VolumePerformanceAttributesType { - o.ExtentEnabledPtr = &newValue - return o -} - -// FcDelegsEnabled is a 'getter' method -func (o *VolumePerformanceAttributesType) FcDelegsEnabled() bool { - r := *o.FcDelegsEnabledPtr - return r -} - -// SetFcDelegsEnabled is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetFcDelegsEnabled(newValue bool) *VolumePerformanceAttributesType { - o.FcDelegsEnabledPtr = &newValue - return o -} - -// IsAtimeUpdateEnabled is a 'getter' method -func (o *VolumePerformanceAttributesType) IsAtimeUpdateEnabled() bool { - r := *o.IsAtimeUpdateEnabledPtr - return r -} - -// SetIsAtimeUpdateEnabled is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetIsAtimeUpdateEnabled(newValue bool) *VolumePerformanceAttributesType { - o.IsAtimeUpdateEnabledPtr = &newValue - return o -} - -// MaxWriteAllocBlocks is a 'getter' method -func (o *VolumePerformanceAttributesType) MaxWriteAllocBlocks() int { - r := *o.MaxWriteAllocBlocksPtr - return r -} - -// SetMaxWriteAllocBlocks is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetMaxWriteAllocBlocks(newValue int) *VolumePerformanceAttributesType { - o.MaxWriteAllocBlocksPtr = &newValue - return o -} - -// MinimalReadAhead is a 'getter' method -func (o *VolumePerformanceAttributesType) MinimalReadAhead() bool { - r := *o.MinimalReadAheadPtr - return r -} - -// SetMinimalReadAhead is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetMinimalReadAhead(newValue bool) *VolumePerformanceAttributesType { - o.MinimalReadAheadPtr = &newValue - return o -} - -// ReadRealloc is a 'getter' method -func (o *VolumePerformanceAttributesType) ReadRealloc() string { - r := *o.ReadReallocPtr - return r -} - -// SetReadRealloc is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetReadRealloc(newValue string) *VolumePerformanceAttributesType { - o.ReadReallocPtr = &newValue - return o -} - -// SingleInstanceDataLogging is a 'getter' method -func (o *VolumePerformanceAttributesType) SingleInstanceDataLogging() string { - r := *o.SingleInstanceDataLoggingPtr - return r -} - -// SetSingleInstanceDataLogging is a fluent style 'setter' method that can be chained -func (o *VolumePerformanceAttributesType) SetSingleInstanceDataLogging(newValue string) *VolumePerformanceAttributesType { - o.SingleInstanceDataLoggingPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-qos-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-qos-attributes.go deleted file mode 100644 index 9e76ef7fd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-qos-attributes.go +++ /dev/null @@ -1,58 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeQosAttributesType is a structure to represent a volume-qos-attributes ZAPI object -type VolumeQosAttributesType struct { - XMLName xml.Name `xml:"volume-qos-attributes"` - AdaptivePolicyGroupNamePtr *string `xml:"adaptive-policy-group-name"` - PolicyGroupNamePtr *string `xml:"policy-group-name"` -} - -// NewVolumeQosAttributesType is a factory method for creating new instances of VolumeQosAttributesType objects -func NewVolumeQosAttributesType() *VolumeQosAttributesType { - return &VolumeQosAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeQosAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeQosAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AdaptivePolicyGroupName is a 'getter' method -func (o *VolumeQosAttributesType) AdaptivePolicyGroupName() string { - r := *o.AdaptivePolicyGroupNamePtr - return r -} - -// SetAdaptivePolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeQosAttributesType) SetAdaptivePolicyGroupName(newValue string) *VolumeQosAttributesType { - o.AdaptivePolicyGroupNamePtr = &newValue - return o -} - -// PolicyGroupName is a 'getter' method -func (o *VolumeQosAttributesType) PolicyGroupName() string { - r := *o.PolicyGroupNamePtr - return r -} - -// SetPolicyGroupName is a fluent style 'setter' method that can be chained -func (o *VolumeQosAttributesType) SetPolicyGroupName(newValue string) *VolumeQosAttributesType { - o.PolicyGroupNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-security-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-security-attributes.go deleted file mode 100644 index 2696112bd..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-security-attributes.go +++ /dev/null @@ -1,58 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSecurityAttributesType is a structure to represent a volume-security-attributes ZAPI object -type VolumeSecurityAttributesType struct { - XMLName xml.Name `xml:"volume-security-attributes"` - StylePtr *string `xml:"style"` - VolumeSecurityUnixAttributesPtr *VolumeSecurityUnixAttributesType `xml:"volume-security-unix-attributes"` -} - -// NewVolumeSecurityAttributesType is a factory method for creating new instances of VolumeSecurityAttributesType objects -func NewVolumeSecurityAttributesType() *VolumeSecurityAttributesType { - return &VolumeSecurityAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSecurityAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSecurityAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Style is a 'getter' method -func (o *VolumeSecurityAttributesType) Style() string { - r := *o.StylePtr - return r -} - -// SetStyle is a fluent style 'setter' method that can be chained -func (o *VolumeSecurityAttributesType) SetStyle(newValue string) *VolumeSecurityAttributesType { - o.StylePtr = &newValue - return o -} - -// VolumeSecurityUnixAttributes is a 'getter' method -func (o *VolumeSecurityAttributesType) VolumeSecurityUnixAttributes() VolumeSecurityUnixAttributesType { - r := *o.VolumeSecurityUnixAttributesPtr - return r -} - -// SetVolumeSecurityUnixAttributes is a fluent style 'setter' method that can be chained -func (o *VolumeSecurityAttributesType) SetVolumeSecurityUnixAttributes(newValue VolumeSecurityUnixAttributesType) *VolumeSecurityAttributesType { - o.VolumeSecurityUnixAttributesPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-security-unix-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-security-unix-attributes.go deleted file mode 100644 index 74dab653b..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-security-unix-attributes.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSecurityUnixAttributesType is a structure to represent a volume-security-unix-attributes ZAPI object -type VolumeSecurityUnixAttributesType struct { - XMLName xml.Name `xml:"volume-security-unix-attributes"` - GroupIdPtr *int `xml:"group-id"` - PermissionsPtr *string `xml:"permissions"` - UserIdPtr *int `xml:"user-id"` -} - -// NewVolumeSecurityUnixAttributesType is a factory method for creating new instances of VolumeSecurityUnixAttributesType objects -func NewVolumeSecurityUnixAttributesType() *VolumeSecurityUnixAttributesType { - return &VolumeSecurityUnixAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSecurityUnixAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSecurityUnixAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// GroupId is a 'getter' method -func (o *VolumeSecurityUnixAttributesType) GroupId() int { - r := *o.GroupIdPtr - return r -} - -// SetGroupId is a fluent style 'setter' method that can be chained -func (o *VolumeSecurityUnixAttributesType) SetGroupId(newValue int) *VolumeSecurityUnixAttributesType { - o.GroupIdPtr = &newValue - return o -} - -// Permissions is a 'getter' method -func (o *VolumeSecurityUnixAttributesType) Permissions() string { - r := *o.PermissionsPtr - return r -} - -// SetPermissions is a fluent style 'setter' method that can be chained -func (o *VolumeSecurityUnixAttributesType) SetPermissions(newValue string) *VolumeSecurityUnixAttributesType { - o.PermissionsPtr = &newValue - return o -} - -// UserId is a 'getter' method -func (o *VolumeSecurityUnixAttributesType) UserId() int { - r := *o.UserIdPtr - return r -} - -// SetUserId is a fluent style 'setter' method that can be chained -func (o *VolumeSecurityUnixAttributesType) SetUserId(newValue int) *VolumeSecurityUnixAttributesType { - o.UserIdPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-sis-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-sis-attributes.go deleted file mode 100644 index c5f0c30af..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-sis-attributes.go +++ /dev/null @@ -1,162 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSisAttributesType is a structure to represent a volume-sis-attributes ZAPI object -type VolumeSisAttributesType struct { - XMLName xml.Name `xml:"volume-sis-attributes"` - CompressionSpaceSavedPtr *int `xml:"compression-space-saved"` - DeduplicationSpaceSavedPtr *int `xml:"deduplication-space-saved"` - DeduplicationSpaceSharedPtr *SizeType `xml:"deduplication-space-shared"` - IsSisLoggingEnabledPtr *bool `xml:"is-sis-logging-enabled"` - IsSisStateEnabledPtr *bool `xml:"is-sis-state-enabled"` - IsSisVolumePtr *bool `xml:"is-sis-volume"` - PercentageCompressionSpaceSavedPtr *int `xml:"percentage-compression-space-saved"` - PercentageDeduplicationSpaceSavedPtr *int `xml:"percentage-deduplication-space-saved"` - PercentageTotalSpaceSavedPtr *int `xml:"percentage-total-space-saved"` - TotalSpaceSavedPtr *int `xml:"total-space-saved"` -} - -// NewVolumeSisAttributesType is a factory method for creating new instances of VolumeSisAttributesType objects -func NewVolumeSisAttributesType() *VolumeSisAttributesType { - return &VolumeSisAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSisAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSisAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// CompressionSpaceSaved is a 'getter' method -func (o *VolumeSisAttributesType) CompressionSpaceSaved() int { - r := *o.CompressionSpaceSavedPtr - return r -} - -// SetCompressionSpaceSaved is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetCompressionSpaceSaved(newValue int) *VolumeSisAttributesType { - o.CompressionSpaceSavedPtr = &newValue - return o -} - -// DeduplicationSpaceSaved is a 'getter' method -func (o *VolumeSisAttributesType) DeduplicationSpaceSaved() int { - r := *o.DeduplicationSpaceSavedPtr - return r -} - -// SetDeduplicationSpaceSaved is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetDeduplicationSpaceSaved(newValue int) *VolumeSisAttributesType { - o.DeduplicationSpaceSavedPtr = &newValue - return o -} - -// DeduplicationSpaceShared is a 'getter' method -func (o *VolumeSisAttributesType) DeduplicationSpaceShared() SizeType { - r := *o.DeduplicationSpaceSharedPtr - return r -} - -// SetDeduplicationSpaceShared is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetDeduplicationSpaceShared(newValue SizeType) *VolumeSisAttributesType { - o.DeduplicationSpaceSharedPtr = &newValue - return o -} - -// IsSisLoggingEnabled is a 'getter' method -func (o *VolumeSisAttributesType) IsSisLoggingEnabled() bool { - r := *o.IsSisLoggingEnabledPtr - return r -} - -// SetIsSisLoggingEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetIsSisLoggingEnabled(newValue bool) *VolumeSisAttributesType { - o.IsSisLoggingEnabledPtr = &newValue - return o -} - -// IsSisStateEnabled is a 'getter' method -func (o *VolumeSisAttributesType) IsSisStateEnabled() bool { - r := *o.IsSisStateEnabledPtr - return r -} - -// SetIsSisStateEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetIsSisStateEnabled(newValue bool) *VolumeSisAttributesType { - o.IsSisStateEnabledPtr = &newValue - return o -} - -// IsSisVolume is a 'getter' method -func (o *VolumeSisAttributesType) IsSisVolume() bool { - r := *o.IsSisVolumePtr - return r -} - -// SetIsSisVolume is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetIsSisVolume(newValue bool) *VolumeSisAttributesType { - o.IsSisVolumePtr = &newValue - return o -} - -// PercentageCompressionSpaceSaved is a 'getter' method -func (o *VolumeSisAttributesType) PercentageCompressionSpaceSaved() int { - r := *o.PercentageCompressionSpaceSavedPtr - return r -} - -// SetPercentageCompressionSpaceSaved is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetPercentageCompressionSpaceSaved(newValue int) *VolumeSisAttributesType { - o.PercentageCompressionSpaceSavedPtr = &newValue - return o -} - -// PercentageDeduplicationSpaceSaved is a 'getter' method -func (o *VolumeSisAttributesType) PercentageDeduplicationSpaceSaved() int { - r := *o.PercentageDeduplicationSpaceSavedPtr - return r -} - -// SetPercentageDeduplicationSpaceSaved is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetPercentageDeduplicationSpaceSaved(newValue int) *VolumeSisAttributesType { - o.PercentageDeduplicationSpaceSavedPtr = &newValue - return o -} - -// PercentageTotalSpaceSaved is a 'getter' method -func (o *VolumeSisAttributesType) PercentageTotalSpaceSaved() int { - r := *o.PercentageTotalSpaceSavedPtr - return r -} - -// SetPercentageTotalSpaceSaved is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetPercentageTotalSpaceSaved(newValue int) *VolumeSisAttributesType { - o.PercentageTotalSpaceSavedPtr = &newValue - return o -} - -// TotalSpaceSaved is a 'getter' method -func (o *VolumeSisAttributesType) TotalSpaceSaved() int { - r := *o.TotalSpaceSavedPtr - return r -} - -// SetTotalSpaceSaved is a fluent style 'setter' method that can be chained -func (o *VolumeSisAttributesType) SetTotalSpaceSaved(newValue int) *VolumeSisAttributesType { - o.TotalSpaceSavedPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snaplock-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snaplock-attributes.go deleted file mode 100644 index 41baffa03..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snaplock-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSnaplockAttributesType is a structure to represent a volume-snaplock-attributes ZAPI object -type VolumeSnaplockAttributesType struct { - XMLName xml.Name `xml:"volume-snaplock-attributes"` - SnaplockTypePtr *SnaplocktypeType `xml:"snaplock-type"` -} - -// NewVolumeSnaplockAttributesType is a factory method for creating new instances of VolumeSnaplockAttributesType objects -func NewVolumeSnaplockAttributesType() *VolumeSnaplockAttributesType { - return &VolumeSnaplockAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSnaplockAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSnaplockAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// SnaplockType is a 'getter' method -func (o *VolumeSnaplockAttributesType) SnaplockType() SnaplocktypeType { - r := *o.SnaplockTypePtr - return r -} - -// SetSnaplockType is a fluent style 'setter' method that can be chained -func (o *VolumeSnaplockAttributesType) SetSnaplockType(newValue SnaplocktypeType) *VolumeSnaplockAttributesType { - o.SnaplockTypePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snapshot-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snapshot-attributes.go deleted file mode 100644 index eb5d1fefa..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snapshot-attributes.go +++ /dev/null @@ -1,97 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSnapshotAttributesType is a structure to represent a volume-snapshot-attributes ZAPI object -type VolumeSnapshotAttributesType struct { - XMLName xml.Name `xml:"volume-snapshot-attributes"` - AutoSnapshotsEnabledPtr *bool `xml:"auto-snapshots-enabled"` - SnapdirAccessEnabledPtr *bool `xml:"snapdir-access-enabled"` - SnapshotCloneDependencyEnabledPtr *bool `xml:"snapshot-clone-dependency-enabled"` - SnapshotCountPtr *int `xml:"snapshot-count"` - SnapshotPolicyPtr *string `xml:"snapshot-policy"` -} - -// NewVolumeSnapshotAttributesType is a factory method for creating new instances of VolumeSnapshotAttributesType objects -func NewVolumeSnapshotAttributesType() *VolumeSnapshotAttributesType { - return &VolumeSnapshotAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSnapshotAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSnapshotAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AutoSnapshotsEnabled is a 'getter' method -func (o *VolumeSnapshotAttributesType) AutoSnapshotsEnabled() bool { - r := *o.AutoSnapshotsEnabledPtr - return r -} - -// SetAutoSnapshotsEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAttributesType) SetAutoSnapshotsEnabled(newValue bool) *VolumeSnapshotAttributesType { - o.AutoSnapshotsEnabledPtr = &newValue - return o -} - -// SnapdirAccessEnabled is a 'getter' method -func (o *VolumeSnapshotAttributesType) SnapdirAccessEnabled() bool { - r := *o.SnapdirAccessEnabledPtr - return r -} - -// SetSnapdirAccessEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAttributesType) SetSnapdirAccessEnabled(newValue bool) *VolumeSnapshotAttributesType { - o.SnapdirAccessEnabledPtr = &newValue - return o -} - -// SnapshotCloneDependencyEnabled is a 'getter' method -func (o *VolumeSnapshotAttributesType) SnapshotCloneDependencyEnabled() bool { - r := *o.SnapshotCloneDependencyEnabledPtr - return r -} - -// SetSnapshotCloneDependencyEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAttributesType) SetSnapshotCloneDependencyEnabled(newValue bool) *VolumeSnapshotAttributesType { - o.SnapshotCloneDependencyEnabledPtr = &newValue - return o -} - -// SnapshotCount is a 'getter' method -func (o *VolumeSnapshotAttributesType) SnapshotCount() int { - r := *o.SnapshotCountPtr - return r -} - -// SetSnapshotCount is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAttributesType) SetSnapshotCount(newValue int) *VolumeSnapshotAttributesType { - o.SnapshotCountPtr = &newValue - return o -} - -// SnapshotPolicy is a 'getter' method -func (o *VolumeSnapshotAttributesType) SnapshotPolicy() string { - r := *o.SnapshotPolicyPtr - return r -} - -// SetSnapshotPolicy is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAttributesType) SetSnapshotPolicy(newValue string) *VolumeSnapshotAttributesType { - o.SnapshotPolicyPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snapshot-autodelete-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snapshot-autodelete-attributes.go deleted file mode 100644 index 51c841bb2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-snapshot-autodelete-attributes.go +++ /dev/null @@ -1,136 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSnapshotAutodeleteAttributesType is a structure to represent a volume-snapshot-autodelete-attributes ZAPI object -type VolumeSnapshotAutodeleteAttributesType struct { - XMLName xml.Name `xml:"volume-snapshot-autodelete-attributes"` - CommitmentPtr *string `xml:"commitment"` - DeferDeletePtr *string `xml:"defer-delete"` - DeleteOrderPtr *string `xml:"delete-order"` - DestroyListPtr *string `xml:"destroy-list"` - IsAutodeleteEnabledPtr *bool `xml:"is-autodelete-enabled"` - PrefixPtr *string `xml:"prefix"` - TargetFreeSpacePtr *int `xml:"target-free-space"` - TriggerPtr *string `xml:"trigger"` -} - -// NewVolumeSnapshotAutodeleteAttributesType is a factory method for creating new instances of VolumeSnapshotAutodeleteAttributesType objects -func NewVolumeSnapshotAutodeleteAttributesType() *VolumeSnapshotAutodeleteAttributesType { - return &VolumeSnapshotAutodeleteAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSnapshotAutodeleteAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSnapshotAutodeleteAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// Commitment is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) Commitment() string { - r := *o.CommitmentPtr - return r -} - -// SetCommitment is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetCommitment(newValue string) *VolumeSnapshotAutodeleteAttributesType { - o.CommitmentPtr = &newValue - return o -} - -// DeferDelete is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) DeferDelete() string { - r := *o.DeferDeletePtr - return r -} - -// SetDeferDelete is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetDeferDelete(newValue string) *VolumeSnapshotAutodeleteAttributesType { - o.DeferDeletePtr = &newValue - return o -} - -// DeleteOrder is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) DeleteOrder() string { - r := *o.DeleteOrderPtr - return r -} - -// SetDeleteOrder is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetDeleteOrder(newValue string) *VolumeSnapshotAutodeleteAttributesType { - o.DeleteOrderPtr = &newValue - return o -} - -// DestroyList is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) DestroyList() string { - r := *o.DestroyListPtr - return r -} - -// SetDestroyList is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetDestroyList(newValue string) *VolumeSnapshotAutodeleteAttributesType { - o.DestroyListPtr = &newValue - return o -} - -// IsAutodeleteEnabled is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) IsAutodeleteEnabled() bool { - r := *o.IsAutodeleteEnabledPtr - return r -} - -// SetIsAutodeleteEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetIsAutodeleteEnabled(newValue bool) *VolumeSnapshotAutodeleteAttributesType { - o.IsAutodeleteEnabledPtr = &newValue - return o -} - -// Prefix is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) Prefix() string { - r := *o.PrefixPtr - return r -} - -// SetPrefix is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetPrefix(newValue string) *VolumeSnapshotAutodeleteAttributesType { - o.PrefixPtr = &newValue - return o -} - -// TargetFreeSpace is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) TargetFreeSpace() int { - r := *o.TargetFreeSpacePtr - return r -} - -// SetTargetFreeSpace is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetTargetFreeSpace(newValue int) *VolumeSnapshotAutodeleteAttributesType { - o.TargetFreeSpacePtr = &newValue - return o -} - -// Trigger is a 'getter' method -func (o *VolumeSnapshotAutodeleteAttributesType) Trigger() string { - r := *o.TriggerPtr - return r -} - -// SetTrigger is a fluent style 'setter' method that can be chained -func (o *VolumeSnapshotAutodeleteAttributesType) SetTrigger(newValue string) *VolumeSnapshotAutodeleteAttributesType { - o.TriggerPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-space-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-space-attributes.go deleted file mode 100644 index ba9e38205..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-space-attributes.go +++ /dev/null @@ -1,539 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeSpaceAttributesType is a structure to represent a volume-space-attributes ZAPI object -type VolumeSpaceAttributesType struct { - XMLName xml.Name `xml:"volume-space-attributes"` - ExpectedAvailablePtr *int `xml:"expected-available"` - FilesystemSizePtr *int `xml:"filesystem-size"` - IsFilesysSizeFixedPtr *bool `xml:"is-filesys-size-fixed"` - IsSpaceEnforcementLogicalPtr *bool `xml:"is-space-enforcement-logical"` - IsSpaceGuaranteeEnabledPtr *bool `xml:"is-space-guarantee-enabled"` - IsSpaceReportingLogicalPtr *bool `xml:"is-space-reporting-logical"` - IsSpaceSloEnabledPtr *string `xml:"is-space-slo-enabled"` - LogicalAvailablePtr *int `xml:"logical-available"` - LogicalUsedPtr *int `xml:"logical-used"` - LogicalUsedByAfsPtr *int `xml:"logical-used-by-afs"` - LogicalUsedBySnapshotsPtr *int `xml:"logical-used-by-snapshots"` - LogicalUsedPercentPtr *int `xml:"logical-used-percent"` - MaxConstituentSizePtr *SizeType `xml:"max-constituent-size"` - OverProvisionedPtr *int `xml:"over-provisioned"` - OverwriteReservePtr *int `xml:"overwrite-reserve"` - OverwriteReserveRequiredPtr *int `xml:"overwrite-reserve-required"` - OverwriteReserveUsedPtr *int `xml:"overwrite-reserve-used"` - OverwriteReserveUsedActualPtr *int `xml:"overwrite-reserve-used-actual"` - PercentageFractionalReservePtr *int `xml:"percentage-fractional-reserve"` - PercentageSizeUsedPtr *int `xml:"percentage-size-used"` - PercentageSnapshotReservePtr *int `xml:"percentage-snapshot-reserve"` - PercentageSnapshotReserveUsedPtr *int `xml:"percentage-snapshot-reserve-used"` - PerformanceTierInactiveUserDataPtr *int `xml:"performance-tier-inactive-user-data"` - PerformanceTierInactiveUserDataPercentPtr *int `xml:"performance-tier-inactive-user-data-percent"` - PhysicalUsedPtr *int `xml:"physical-used"` - PhysicalUsedPercentPtr *int `xml:"physical-used-percent"` - SizePtr *int `xml:"size"` - SizeAvailablePtr *int `xml:"size-available"` - SizeAvailableForSnapshotsPtr *int `xml:"size-available-for-snapshots"` - SizeTotalPtr *int `xml:"size-total"` - SizeUsedPtr *int `xml:"size-used"` - SizeUsedBySnapshotsPtr *int `xml:"size-used-by-snapshots"` - SnapshotReserveAvailablePtr *int `xml:"snapshot-reserve-available"` - SnapshotReserveSizePtr *int `xml:"snapshot-reserve-size"` - SpaceFullThresholdPercentPtr *int `xml:"space-full-threshold-percent"` - SpaceGuaranteePtr *string `xml:"space-guarantee"` - SpaceMgmtOptionTryFirstPtr *string `xml:"space-mgmt-option-try-first"` - SpaceNearlyFullThresholdPercentPtr *int `xml:"space-nearly-full-threshold-percent"` - SpaceSloPtr *SpaceSloEnumType `xml:"space-slo"` -} - -// NewVolumeSpaceAttributesType is a factory method for creating new instances of VolumeSpaceAttributesType objects -func NewVolumeSpaceAttributesType() *VolumeSpaceAttributesType { - return &VolumeSpaceAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeSpaceAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeSpaceAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// ExpectedAvailable is a 'getter' method -func (o *VolumeSpaceAttributesType) ExpectedAvailable() int { - r := *o.ExpectedAvailablePtr - return r -} - -// SetExpectedAvailable is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetExpectedAvailable(newValue int) *VolumeSpaceAttributesType { - o.ExpectedAvailablePtr = &newValue - return o -} - -// FilesystemSize is a 'getter' method -func (o *VolumeSpaceAttributesType) FilesystemSize() int { - r := *o.FilesystemSizePtr - return r -} - -// SetFilesystemSize is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetFilesystemSize(newValue int) *VolumeSpaceAttributesType { - o.FilesystemSizePtr = &newValue - return o -} - -// IsFilesysSizeFixed is a 'getter' method -func (o *VolumeSpaceAttributesType) IsFilesysSizeFixed() bool { - r := *o.IsFilesysSizeFixedPtr - return r -} - -// SetIsFilesysSizeFixed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetIsFilesysSizeFixed(newValue bool) *VolumeSpaceAttributesType { - o.IsFilesysSizeFixedPtr = &newValue - return o -} - -// IsSpaceEnforcementLogical is a 'getter' method -func (o *VolumeSpaceAttributesType) IsSpaceEnforcementLogical() bool { - r := *o.IsSpaceEnforcementLogicalPtr - return r -} - -// SetIsSpaceEnforcementLogical is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetIsSpaceEnforcementLogical(newValue bool) *VolumeSpaceAttributesType { - o.IsSpaceEnforcementLogicalPtr = &newValue - return o -} - -// IsSpaceGuaranteeEnabled is a 'getter' method -func (o *VolumeSpaceAttributesType) IsSpaceGuaranteeEnabled() bool { - r := *o.IsSpaceGuaranteeEnabledPtr - return r -} - -// SetIsSpaceGuaranteeEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetIsSpaceGuaranteeEnabled(newValue bool) *VolumeSpaceAttributesType { - o.IsSpaceGuaranteeEnabledPtr = &newValue - return o -} - -// IsSpaceReportingLogical is a 'getter' method -func (o *VolumeSpaceAttributesType) IsSpaceReportingLogical() bool { - r := *o.IsSpaceReportingLogicalPtr - return r -} - -// SetIsSpaceReportingLogical is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetIsSpaceReportingLogical(newValue bool) *VolumeSpaceAttributesType { - o.IsSpaceReportingLogicalPtr = &newValue - return o -} - -// IsSpaceSloEnabled is a 'getter' method -func (o *VolumeSpaceAttributesType) IsSpaceSloEnabled() string { - r := *o.IsSpaceSloEnabledPtr - return r -} - -// SetIsSpaceSloEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetIsSpaceSloEnabled(newValue string) *VolumeSpaceAttributesType { - o.IsSpaceSloEnabledPtr = &newValue - return o -} - -// LogicalAvailable is a 'getter' method -func (o *VolumeSpaceAttributesType) LogicalAvailable() int { - r := *o.LogicalAvailablePtr - return r -} - -// SetLogicalAvailable is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetLogicalAvailable(newValue int) *VolumeSpaceAttributesType { - o.LogicalAvailablePtr = &newValue - return o -} - -// LogicalUsed is a 'getter' method -func (o *VolumeSpaceAttributesType) LogicalUsed() int { - r := *o.LogicalUsedPtr - return r -} - -// SetLogicalUsed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetLogicalUsed(newValue int) *VolumeSpaceAttributesType { - o.LogicalUsedPtr = &newValue - return o -} - -// LogicalUsedByAfs is a 'getter' method -func (o *VolumeSpaceAttributesType) LogicalUsedByAfs() int { - r := *o.LogicalUsedByAfsPtr - return r -} - -// SetLogicalUsedByAfs is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetLogicalUsedByAfs(newValue int) *VolumeSpaceAttributesType { - o.LogicalUsedByAfsPtr = &newValue - return o -} - -// LogicalUsedBySnapshots is a 'getter' method -func (o *VolumeSpaceAttributesType) LogicalUsedBySnapshots() int { - r := *o.LogicalUsedBySnapshotsPtr - return r -} - -// SetLogicalUsedBySnapshots is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetLogicalUsedBySnapshots(newValue int) *VolumeSpaceAttributesType { - o.LogicalUsedBySnapshotsPtr = &newValue - return o -} - -// LogicalUsedPercent is a 'getter' method -func (o *VolumeSpaceAttributesType) LogicalUsedPercent() int { - r := *o.LogicalUsedPercentPtr - return r -} - -// SetLogicalUsedPercent is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetLogicalUsedPercent(newValue int) *VolumeSpaceAttributesType { - o.LogicalUsedPercentPtr = &newValue - return o -} - -// MaxConstituentSize is a 'getter' method -func (o *VolumeSpaceAttributesType) MaxConstituentSize() SizeType { - r := *o.MaxConstituentSizePtr - return r -} - -// SetMaxConstituentSize is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetMaxConstituentSize(newValue SizeType) *VolumeSpaceAttributesType { - o.MaxConstituentSizePtr = &newValue - return o -} - -// OverProvisioned is a 'getter' method -func (o *VolumeSpaceAttributesType) OverProvisioned() int { - r := *o.OverProvisionedPtr - return r -} - -// SetOverProvisioned is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetOverProvisioned(newValue int) *VolumeSpaceAttributesType { - o.OverProvisionedPtr = &newValue - return o -} - -// OverwriteReserve is a 'getter' method -func (o *VolumeSpaceAttributesType) OverwriteReserve() int { - r := *o.OverwriteReservePtr - return r -} - -// SetOverwriteReserve is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetOverwriteReserve(newValue int) *VolumeSpaceAttributesType { - o.OverwriteReservePtr = &newValue - return o -} - -// OverwriteReserveRequired is a 'getter' method -func (o *VolumeSpaceAttributesType) OverwriteReserveRequired() int { - r := *o.OverwriteReserveRequiredPtr - return r -} - -// SetOverwriteReserveRequired is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetOverwriteReserveRequired(newValue int) *VolumeSpaceAttributesType { - o.OverwriteReserveRequiredPtr = &newValue - return o -} - -// OverwriteReserveUsed is a 'getter' method -func (o *VolumeSpaceAttributesType) OverwriteReserveUsed() int { - r := *o.OverwriteReserveUsedPtr - return r -} - -// SetOverwriteReserveUsed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetOverwriteReserveUsed(newValue int) *VolumeSpaceAttributesType { - o.OverwriteReserveUsedPtr = &newValue - return o -} - -// OverwriteReserveUsedActual is a 'getter' method -func (o *VolumeSpaceAttributesType) OverwriteReserveUsedActual() int { - r := *o.OverwriteReserveUsedActualPtr - return r -} - -// SetOverwriteReserveUsedActual is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetOverwriteReserveUsedActual(newValue int) *VolumeSpaceAttributesType { - o.OverwriteReserveUsedActualPtr = &newValue - return o -} - -// PercentageFractionalReserve is a 'getter' method -func (o *VolumeSpaceAttributesType) PercentageFractionalReserve() int { - r := *o.PercentageFractionalReservePtr - return r -} - -// SetPercentageFractionalReserve is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPercentageFractionalReserve(newValue int) *VolumeSpaceAttributesType { - o.PercentageFractionalReservePtr = &newValue - return o -} - -// PercentageSizeUsed is a 'getter' method -func (o *VolumeSpaceAttributesType) PercentageSizeUsed() int { - r := *o.PercentageSizeUsedPtr - return r -} - -// SetPercentageSizeUsed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPercentageSizeUsed(newValue int) *VolumeSpaceAttributesType { - o.PercentageSizeUsedPtr = &newValue - return o -} - -// PercentageSnapshotReserve is a 'getter' method -func (o *VolumeSpaceAttributesType) PercentageSnapshotReserve() int { - r := *o.PercentageSnapshotReservePtr - return r -} - -// SetPercentageSnapshotReserve is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPercentageSnapshotReserve(newValue int) *VolumeSpaceAttributesType { - o.PercentageSnapshotReservePtr = &newValue - return o -} - -// PercentageSnapshotReserveUsed is a 'getter' method -func (o *VolumeSpaceAttributesType) PercentageSnapshotReserveUsed() int { - r := *o.PercentageSnapshotReserveUsedPtr - return r -} - -// SetPercentageSnapshotReserveUsed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPercentageSnapshotReserveUsed(newValue int) *VolumeSpaceAttributesType { - o.PercentageSnapshotReserveUsedPtr = &newValue - return o -} - -// PerformanceTierInactiveUserData is a 'getter' method -func (o *VolumeSpaceAttributesType) PerformanceTierInactiveUserData() int { - r := *o.PerformanceTierInactiveUserDataPtr - return r -} - -// SetPerformanceTierInactiveUserData is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPerformanceTierInactiveUserData(newValue int) *VolumeSpaceAttributesType { - o.PerformanceTierInactiveUserDataPtr = &newValue - return o -} - -// PerformanceTierInactiveUserDataPercent is a 'getter' method -func (o *VolumeSpaceAttributesType) PerformanceTierInactiveUserDataPercent() int { - r := *o.PerformanceTierInactiveUserDataPercentPtr - return r -} - -// SetPerformanceTierInactiveUserDataPercent is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPerformanceTierInactiveUserDataPercent(newValue int) *VolumeSpaceAttributesType { - o.PerformanceTierInactiveUserDataPercentPtr = &newValue - return o -} - -// PhysicalUsed is a 'getter' method -func (o *VolumeSpaceAttributesType) PhysicalUsed() int { - r := *o.PhysicalUsedPtr - return r -} - -// SetPhysicalUsed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPhysicalUsed(newValue int) *VolumeSpaceAttributesType { - o.PhysicalUsedPtr = &newValue - return o -} - -// PhysicalUsedPercent is a 'getter' method -func (o *VolumeSpaceAttributesType) PhysicalUsedPercent() int { - r := *o.PhysicalUsedPercentPtr - return r -} - -// SetPhysicalUsedPercent is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetPhysicalUsedPercent(newValue int) *VolumeSpaceAttributesType { - o.PhysicalUsedPercentPtr = &newValue - return o -} - -// Size is a 'getter' method -func (o *VolumeSpaceAttributesType) Size() int { - r := *o.SizePtr - return r -} - -// SetSize is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSize(newValue int) *VolumeSpaceAttributesType { - o.SizePtr = &newValue - return o -} - -// SizeAvailable is a 'getter' method -func (o *VolumeSpaceAttributesType) SizeAvailable() int { - r := *o.SizeAvailablePtr - return r -} - -// SetSizeAvailable is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSizeAvailable(newValue int) *VolumeSpaceAttributesType { - o.SizeAvailablePtr = &newValue - return o -} - -// SizeAvailableForSnapshots is a 'getter' method -func (o *VolumeSpaceAttributesType) SizeAvailableForSnapshots() int { - r := *o.SizeAvailableForSnapshotsPtr - return r -} - -// SetSizeAvailableForSnapshots is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSizeAvailableForSnapshots(newValue int) *VolumeSpaceAttributesType { - o.SizeAvailableForSnapshotsPtr = &newValue - return o -} - -// SizeTotal is a 'getter' method -func (o *VolumeSpaceAttributesType) SizeTotal() int { - r := *o.SizeTotalPtr - return r -} - -// SetSizeTotal is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSizeTotal(newValue int) *VolumeSpaceAttributesType { - o.SizeTotalPtr = &newValue - return o -} - -// SizeUsed is a 'getter' method -func (o *VolumeSpaceAttributesType) SizeUsed() int { - r := *o.SizeUsedPtr - return r -} - -// SetSizeUsed is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSizeUsed(newValue int) *VolumeSpaceAttributesType { - o.SizeUsedPtr = &newValue - return o -} - -// SizeUsedBySnapshots is a 'getter' method -func (o *VolumeSpaceAttributesType) SizeUsedBySnapshots() int { - r := *o.SizeUsedBySnapshotsPtr - return r -} - -// SetSizeUsedBySnapshots is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSizeUsedBySnapshots(newValue int) *VolumeSpaceAttributesType { - o.SizeUsedBySnapshotsPtr = &newValue - return o -} - -// SnapshotReserveAvailable is a 'getter' method -func (o *VolumeSpaceAttributesType) SnapshotReserveAvailable() int { - r := *o.SnapshotReserveAvailablePtr - return r -} - -// SetSnapshotReserveAvailable is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSnapshotReserveAvailable(newValue int) *VolumeSpaceAttributesType { - o.SnapshotReserveAvailablePtr = &newValue - return o -} - -// SnapshotReserveSize is a 'getter' method -func (o *VolumeSpaceAttributesType) SnapshotReserveSize() int { - r := *o.SnapshotReserveSizePtr - return r -} - -// SetSnapshotReserveSize is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSnapshotReserveSize(newValue int) *VolumeSpaceAttributesType { - o.SnapshotReserveSizePtr = &newValue - return o -} - -// SpaceFullThresholdPercent is a 'getter' method -func (o *VolumeSpaceAttributesType) SpaceFullThresholdPercent() int { - r := *o.SpaceFullThresholdPercentPtr - return r -} - -// SetSpaceFullThresholdPercent is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSpaceFullThresholdPercent(newValue int) *VolumeSpaceAttributesType { - o.SpaceFullThresholdPercentPtr = &newValue - return o -} - -// SpaceGuarantee is a 'getter' method -func (o *VolumeSpaceAttributesType) SpaceGuarantee() string { - r := *o.SpaceGuaranteePtr - return r -} - -// SetSpaceGuarantee is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSpaceGuarantee(newValue string) *VolumeSpaceAttributesType { - o.SpaceGuaranteePtr = &newValue - return o -} - -// SpaceMgmtOptionTryFirst is a 'getter' method -func (o *VolumeSpaceAttributesType) SpaceMgmtOptionTryFirst() string { - r := *o.SpaceMgmtOptionTryFirstPtr - return r -} - -// SetSpaceMgmtOptionTryFirst is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSpaceMgmtOptionTryFirst(newValue string) *VolumeSpaceAttributesType { - o.SpaceMgmtOptionTryFirstPtr = &newValue - return o -} - -// SpaceNearlyFullThresholdPercent is a 'getter' method -func (o *VolumeSpaceAttributesType) SpaceNearlyFullThresholdPercent() int { - r := *o.SpaceNearlyFullThresholdPercentPtr - return r -} - -// SetSpaceNearlyFullThresholdPercent is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSpaceNearlyFullThresholdPercent(newValue int) *VolumeSpaceAttributesType { - o.SpaceNearlyFullThresholdPercentPtr = &newValue - return o -} - -// SpaceSlo is a 'getter' method -func (o *VolumeSpaceAttributesType) SpaceSlo() SpaceSloEnumType { - r := *o.SpaceSloPtr - return r -} - -// SetSpaceSlo is a fluent style 'setter' method that can be chained -func (o *VolumeSpaceAttributesType) SetSpaceSlo(newValue SpaceSloEnumType) *VolumeSpaceAttributesType { - o.SpaceSloPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-state-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-state-attributes.go deleted file mode 100644 index 5b741b68a..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-state-attributes.go +++ /dev/null @@ -1,352 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeStateAttributesType is a structure to represent a volume-state-attributes ZAPI object -type VolumeStateAttributesType struct { - XMLName xml.Name `xml:"volume-state-attributes"` - BecomeNodeRootAfterRebootPtr *bool `xml:"become-node-root-after-reboot"` - ForceNvfailOnDrPtr *bool `xml:"force-nvfail-on-dr"` - IgnoreInconsistentPtr *bool `xml:"ignore-inconsistent"` - InNvfailedStatePtr *bool `xml:"in-nvfailed-state"` - IsClusterVolumePtr *bool `xml:"is-cluster-volume"` - IsConstituentPtr *bool `xml:"is-constituent"` - IsFlexgroupPtr *bool `xml:"is-flexgroup"` - IsFlexgroupQtreeEnabledPtr *bool `xml:"is-flexgroup-qtree-enabled"` - IsInconsistentPtr *bool `xml:"is-inconsistent"` - IsInvalidPtr *bool `xml:"is-invalid"` - IsJunctionActivePtr *bool `xml:"is-junction-active"` - IsMoveDestinationInCutoverPtr *bool `xml:"is-move-destination-in-cutover"` - IsMovingPtr *bool `xml:"is-moving"` - IsNodeRootPtr *bool `xml:"is-node-root"` - IsNvfailEnabledPtr *bool `xml:"is-nvfail-enabled"` - IsProtocolAccessFencedPtr *bool `xml:"is-protocol-access-fenced"` - IsQuiescedInMemoryPtr *bool `xml:"is-quiesced-in-memory"` - IsQuiescedOnDiskPtr *bool `xml:"is-quiesced-on-disk"` - IsUnrecoverablePtr *bool `xml:"is-unrecoverable"` - IsVolumeInCutoverPtr *bool `xml:"is-volume-in-cutover"` - IsVserverRootPtr *bool `xml:"is-vserver-root"` - StatePtr *string `xml:"state"` - StatusPtr *VolumeStateAttributesTypeStatus `xml:"status"` - // work in progress -} - -// NewVolumeStateAttributesType is a factory method for creating new instances of VolumeStateAttributesType objects -func NewVolumeStateAttributesType() *VolumeStateAttributesType { - return &VolumeStateAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeStateAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeStateAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// BecomeNodeRootAfterReboot is a 'getter' method -func (o *VolumeStateAttributesType) BecomeNodeRootAfterReboot() bool { - r := *o.BecomeNodeRootAfterRebootPtr - return r -} - -// SetBecomeNodeRootAfterReboot is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetBecomeNodeRootAfterReboot(newValue bool) *VolumeStateAttributesType { - o.BecomeNodeRootAfterRebootPtr = &newValue - return o -} - -// ForceNvfailOnDr is a 'getter' method -func (o *VolumeStateAttributesType) ForceNvfailOnDr() bool { - r := *o.ForceNvfailOnDrPtr - return r -} - -// SetForceNvfailOnDr is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetForceNvfailOnDr(newValue bool) *VolumeStateAttributesType { - o.ForceNvfailOnDrPtr = &newValue - return o -} - -// IgnoreInconsistent is a 'getter' method -func (o *VolumeStateAttributesType) IgnoreInconsistent() bool { - r := *o.IgnoreInconsistentPtr - return r -} - -// SetIgnoreInconsistent is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIgnoreInconsistent(newValue bool) *VolumeStateAttributesType { - o.IgnoreInconsistentPtr = &newValue - return o -} - -// InNvfailedState is a 'getter' method -func (o *VolumeStateAttributesType) InNvfailedState() bool { - r := *o.InNvfailedStatePtr - return r -} - -// SetInNvfailedState is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetInNvfailedState(newValue bool) *VolumeStateAttributesType { - o.InNvfailedStatePtr = &newValue - return o -} - -// IsClusterVolume is a 'getter' method -func (o *VolumeStateAttributesType) IsClusterVolume() bool { - r := *o.IsClusterVolumePtr - return r -} - -// SetIsClusterVolume is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsClusterVolume(newValue bool) *VolumeStateAttributesType { - o.IsClusterVolumePtr = &newValue - return o -} - -// IsConstituent is a 'getter' method -func (o *VolumeStateAttributesType) IsConstituent() bool { - r := *o.IsConstituentPtr - return r -} - -// SetIsConstituent is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsConstituent(newValue bool) *VolumeStateAttributesType { - o.IsConstituentPtr = &newValue - return o -} - -// IsFlexgroup is a 'getter' method -func (o *VolumeStateAttributesType) IsFlexgroup() bool { - r := *o.IsFlexgroupPtr - return r -} - -// SetIsFlexgroup is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsFlexgroup(newValue bool) *VolumeStateAttributesType { - o.IsFlexgroupPtr = &newValue - return o -} - -// IsFlexgroupQtreeEnabled is a 'getter' method -func (o *VolumeStateAttributesType) IsFlexgroupQtreeEnabled() bool { - r := *o.IsFlexgroupQtreeEnabledPtr - return r -} - -// SetIsFlexgroupQtreeEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsFlexgroupQtreeEnabled(newValue bool) *VolumeStateAttributesType { - o.IsFlexgroupQtreeEnabledPtr = &newValue - return o -} - -// IsInconsistent is a 'getter' method -func (o *VolumeStateAttributesType) IsInconsistent() bool { - r := *o.IsInconsistentPtr - return r -} - -// SetIsInconsistent is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsInconsistent(newValue bool) *VolumeStateAttributesType { - o.IsInconsistentPtr = &newValue - return o -} - -// IsInvalid is a 'getter' method -func (o *VolumeStateAttributesType) IsInvalid() bool { - r := *o.IsInvalidPtr - return r -} - -// SetIsInvalid is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsInvalid(newValue bool) *VolumeStateAttributesType { - o.IsInvalidPtr = &newValue - return o -} - -// IsJunctionActive is a 'getter' method -func (o *VolumeStateAttributesType) IsJunctionActive() bool { - r := *o.IsJunctionActivePtr - return r -} - -// SetIsJunctionActive is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsJunctionActive(newValue bool) *VolumeStateAttributesType { - o.IsJunctionActivePtr = &newValue - return o -} - -// IsMoveDestinationInCutover is a 'getter' method -func (o *VolumeStateAttributesType) IsMoveDestinationInCutover() bool { - r := *o.IsMoveDestinationInCutoverPtr - return r -} - -// SetIsMoveDestinationInCutover is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsMoveDestinationInCutover(newValue bool) *VolumeStateAttributesType { - o.IsMoveDestinationInCutoverPtr = &newValue - return o -} - -// IsMoving is a 'getter' method -func (o *VolumeStateAttributesType) IsMoving() bool { - r := *o.IsMovingPtr - return r -} - -// SetIsMoving is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsMoving(newValue bool) *VolumeStateAttributesType { - o.IsMovingPtr = &newValue - return o -} - -// IsNodeRoot is a 'getter' method -func (o *VolumeStateAttributesType) IsNodeRoot() bool { - r := *o.IsNodeRootPtr - return r -} - -// SetIsNodeRoot is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsNodeRoot(newValue bool) *VolumeStateAttributesType { - o.IsNodeRootPtr = &newValue - return o -} - -// IsNvfailEnabled is a 'getter' method -func (o *VolumeStateAttributesType) IsNvfailEnabled() bool { - r := *o.IsNvfailEnabledPtr - return r -} - -// SetIsNvfailEnabled is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsNvfailEnabled(newValue bool) *VolumeStateAttributesType { - o.IsNvfailEnabledPtr = &newValue - return o -} - -// IsProtocolAccessFenced is a 'getter' method -func (o *VolumeStateAttributesType) IsProtocolAccessFenced() bool { - r := *o.IsProtocolAccessFencedPtr - return r -} - -// SetIsProtocolAccessFenced is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsProtocolAccessFenced(newValue bool) *VolumeStateAttributesType { - o.IsProtocolAccessFencedPtr = &newValue - return o -} - -// IsQuiescedInMemory is a 'getter' method -func (o *VolumeStateAttributesType) IsQuiescedInMemory() bool { - r := *o.IsQuiescedInMemoryPtr - return r -} - -// SetIsQuiescedInMemory is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsQuiescedInMemory(newValue bool) *VolumeStateAttributesType { - o.IsQuiescedInMemoryPtr = &newValue - return o -} - -// IsQuiescedOnDisk is a 'getter' method -func (o *VolumeStateAttributesType) IsQuiescedOnDisk() bool { - r := *o.IsQuiescedOnDiskPtr - return r -} - -// SetIsQuiescedOnDisk is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsQuiescedOnDisk(newValue bool) *VolumeStateAttributesType { - o.IsQuiescedOnDiskPtr = &newValue - return o -} - -// IsUnrecoverable is a 'getter' method -func (o *VolumeStateAttributesType) IsUnrecoverable() bool { - r := *o.IsUnrecoverablePtr - return r -} - -// SetIsUnrecoverable is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsUnrecoverable(newValue bool) *VolumeStateAttributesType { - o.IsUnrecoverablePtr = &newValue - return o -} - -// IsVolumeInCutover is a 'getter' method -func (o *VolumeStateAttributesType) IsVolumeInCutover() bool { - r := *o.IsVolumeInCutoverPtr - return r -} - -// SetIsVolumeInCutover is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsVolumeInCutover(newValue bool) *VolumeStateAttributesType { - o.IsVolumeInCutoverPtr = &newValue - return o -} - -// IsVserverRoot is a 'getter' method -func (o *VolumeStateAttributesType) IsVserverRoot() bool { - r := *o.IsVserverRootPtr - return r -} - -// SetIsVserverRoot is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetIsVserverRoot(newValue bool) *VolumeStateAttributesType { - o.IsVserverRootPtr = &newValue - return o -} - -// State is a 'getter' method -func (o *VolumeStateAttributesType) State() string { - r := *o.StatePtr - return r -} - -// SetState is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetState(newValue string) *VolumeStateAttributesType { - o.StatePtr = &newValue - return o -} - -// VolumeStateAttributesTypeStatus is a wrapper -type VolumeStateAttributesTypeStatus struct { - XMLName xml.Name `xml:"status"` - StringPtr []string `xml:"string"` -} - -// String is a 'getter' method -func (o *VolumeStateAttributesTypeStatus) String() []string { - r := o.StringPtr - return r -} - -// SetString is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesTypeStatus) SetString(newValue []string) *VolumeStateAttributesTypeStatus { - newSlice := make([]string, len(newValue)) - copy(newSlice, newValue) - o.StringPtr = newSlice - return o -} - -// Status is a 'getter' method -func (o *VolumeStateAttributesType) Status() VolumeStateAttributesTypeStatus { - r := *o.StatusPtr - return r -} - -// SetStatus is a fluent style 'setter' method that can be chained -func (o *VolumeStateAttributesType) SetStatus(newValue VolumeStateAttributesTypeStatus) *VolumeStateAttributesType { - o.StatusPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-transition-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-transition-attributes.go deleted file mode 100644 index 183050f28..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-transition-attributes.go +++ /dev/null @@ -1,84 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeTransitionAttributesType is a structure to represent a volume-transition-attributes ZAPI object -type VolumeTransitionAttributesType struct { - XMLName xml.Name `xml:"volume-transition-attributes"` - IsCftPrecommitPtr *bool `xml:"is-cft-precommit"` - IsCopiedForTransitionPtr *bool `xml:"is-copied-for-transition"` - IsTransitionedPtr *bool `xml:"is-transitioned"` - TransitionBehaviorPtr *string `xml:"transition-behavior"` -} - -// NewVolumeTransitionAttributesType is a factory method for creating new instances of VolumeTransitionAttributesType objects -func NewVolumeTransitionAttributesType() *VolumeTransitionAttributesType { - return &VolumeTransitionAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeTransitionAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeTransitionAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// IsCftPrecommit is a 'getter' method -func (o *VolumeTransitionAttributesType) IsCftPrecommit() bool { - r := *o.IsCftPrecommitPtr - return r -} - -// SetIsCftPrecommit is a fluent style 'setter' method that can be chained -func (o *VolumeTransitionAttributesType) SetIsCftPrecommit(newValue bool) *VolumeTransitionAttributesType { - o.IsCftPrecommitPtr = &newValue - return o -} - -// IsCopiedForTransition is a 'getter' method -func (o *VolumeTransitionAttributesType) IsCopiedForTransition() bool { - r := *o.IsCopiedForTransitionPtr - return r -} - -// SetIsCopiedForTransition is a fluent style 'setter' method that can be chained -func (o *VolumeTransitionAttributesType) SetIsCopiedForTransition(newValue bool) *VolumeTransitionAttributesType { - o.IsCopiedForTransitionPtr = &newValue - return o -} - -// IsTransitioned is a 'getter' method -func (o *VolumeTransitionAttributesType) IsTransitioned() bool { - r := *o.IsTransitionedPtr - return r -} - -// SetIsTransitioned is a fluent style 'setter' method that can be chained -func (o *VolumeTransitionAttributesType) SetIsTransitioned(newValue bool) *VolumeTransitionAttributesType { - o.IsTransitionedPtr = &newValue - return o -} - -// TransitionBehavior is a 'getter' method -func (o *VolumeTransitionAttributesType) TransitionBehavior() string { - r := *o.TransitionBehaviorPtr - return r -} - -// SetTransitionBehavior is a fluent style 'setter' method that can be chained -func (o *VolumeTransitionAttributesType) SetTransitionBehavior(newValue string) *VolumeTransitionAttributesType { - o.TransitionBehaviorPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-vm-align-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-vm-align-attributes.go deleted file mode 100644 index 8f872f413..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-vm-align-attributes.go +++ /dev/null @@ -1,58 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeVmAlignAttributesType is a structure to represent a volume-vm-align-attributes ZAPI object -type VolumeVmAlignAttributesType struct { - XMLName xml.Name `xml:"volume-vm-align-attributes"` - VmAlignSectorPtr *int `xml:"vm-align-sector"` - VmAlignSuffixPtr *string `xml:"vm-align-suffix"` -} - -// NewVolumeVmAlignAttributesType is a factory method for creating new instances of VolumeVmAlignAttributesType objects -func NewVolumeVmAlignAttributesType() *VolumeVmAlignAttributesType { - return &VolumeVmAlignAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeVmAlignAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeVmAlignAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VmAlignSector is a 'getter' method -func (o *VolumeVmAlignAttributesType) VmAlignSector() int { - r := *o.VmAlignSectorPtr - return r -} - -// SetVmAlignSector is a fluent style 'setter' method that can be chained -func (o *VolumeVmAlignAttributesType) SetVmAlignSector(newValue int) *VolumeVmAlignAttributesType { - o.VmAlignSectorPtr = &newValue - return o -} - -// VmAlignSuffix is a 'getter' method -func (o *VolumeVmAlignAttributesType) VmAlignSuffix() string { - r := *o.VmAlignSuffixPtr - return r -} - -// SetVmAlignSuffix is a fluent style 'setter' method that can be chained -func (o *VolumeVmAlignAttributesType) SetVmAlignSuffix(newValue string) *VolumeVmAlignAttributesType { - o.VmAlignSuffixPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-vserver-dr-protection-attributes.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-vserver-dr-protection-attributes.go deleted file mode 100644 index 62d1a5191..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-volume-vserver-dr-protection-attributes.go +++ /dev/null @@ -1,45 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VolumeVserverDrProtectionAttributesType is a structure to represent a volume-vserver-dr-protection-attributes ZAPI object -type VolumeVserverDrProtectionAttributesType struct { - XMLName xml.Name `xml:"volume-vserver-dr-protection-attributes"` - VserverDrProtectionPtr *string `xml:"vserver-dr-protection"` -} - -// NewVolumeVserverDrProtectionAttributesType is a factory method for creating new instances of VolumeVserverDrProtectionAttributesType objects -func NewVolumeVserverDrProtectionAttributesType() *VolumeVserverDrProtectionAttributesType { - return &VolumeVserverDrProtectionAttributesType{} -} - -// ToXML converts this object into an xml string representation -func (o *VolumeVserverDrProtectionAttributesType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VolumeVserverDrProtectionAttributesType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverDrProtection is a 'getter' method -func (o *VolumeVserverDrProtectionAttributesType) VserverDrProtection() string { - r := *o.VserverDrProtectionPtr - return r -} - -// SetVserverDrProtection is a fluent style 'setter' method that can be chained -func (o *VolumeVserverDrProtectionAttributesType) SetVserverDrProtection(newValue string) *VolumeVserverDrProtectionAttributesType { - o.VserverDrProtectionPtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsadminstate.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsadminstate.go deleted file mode 100644 index 40ba23659..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsadminstate.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// VsadminstateType is a structure to represent a vsadminstate ZAPI object -type VsadminstateType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-aggr-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-aggr-info.go deleted file mode 100644 index ed3c4aa82..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-aggr-info.go +++ /dev/null @@ -1,71 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VserverAggrInfoType is a structure to represent a vserver-aggr-info ZAPI object -type VserverAggrInfoType struct { - XMLName xml.Name `xml:"vserver-aggr-info"` - AggrAvailsizePtr *SizeType `xml:"aggr-availsize"` - AggrIsCftPrecommitPtr *bool `xml:"aggr-is-cft-precommit"` - AggrNamePtr *AggrNameType `xml:"aggr-name"` -} - -// NewVserverAggrInfoType is a factory method for creating new instances of VserverAggrInfoType objects -func NewVserverAggrInfoType() *VserverAggrInfoType { - return &VserverAggrInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *VserverAggrInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverAggrInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// AggrAvailsize is a 'getter' method -func (o *VserverAggrInfoType) AggrAvailsize() SizeType { - r := *o.AggrAvailsizePtr - return r -} - -// SetAggrAvailsize is a fluent style 'setter' method that can be chained -func (o *VserverAggrInfoType) SetAggrAvailsize(newValue SizeType) *VserverAggrInfoType { - o.AggrAvailsizePtr = &newValue - return o -} - -// AggrIsCftPrecommit is a 'getter' method -func (o *VserverAggrInfoType) AggrIsCftPrecommit() bool { - r := *o.AggrIsCftPrecommitPtr - return r -} - -// SetAggrIsCftPrecommit is a fluent style 'setter' method that can be chained -func (o *VserverAggrInfoType) SetAggrIsCftPrecommit(newValue bool) *VserverAggrInfoType { - o.AggrIsCftPrecommitPtr = &newValue - return o -} - -// AggrName is a 'getter' method -func (o *VserverAggrInfoType) AggrName() AggrNameType { - r := *o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VserverAggrInfoType) SetAggrName(newValue AggrNameType) *VserverAggrInfoType { - o.AggrNamePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-info.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-info.go deleted file mode 100644 index 8f92415b6..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-info.go +++ /dev/null @@ -1,548 +0,0 @@ -package azgo - -import ( - "encoding/xml" - "reflect" - - log "github.com/sirupsen/logrus" -) - -// VserverInfoType is a structure to represent a vserver-info ZAPI object -type VserverInfoType struct { - XMLName xml.Name `xml:"vserver-info"` - AggrListPtr *VserverInfoTypeAggrList `xml:"aggr-list"` - // work in progress - AllowedProtocolsPtr *VserverInfoTypeAllowedProtocols `xml:"allowed-protocols"` - // work in progress - AntivirusOnAccessPolicyPtr *AntivirusPolicyType `xml:"antivirus-on-access-policy"` - CachingPolicyPtr *string `xml:"caching-policy"` - CommentPtr *string `xml:"comment"` - DisallowedProtocolsPtr *VserverInfoTypeDisallowedProtocols `xml:"disallowed-protocols"` - // work in progress - IpspacePtr *string `xml:"ipspace"` - IsConfigLockedForChangesPtr *bool `xml:"is-config-locked-for-changes"` - IsRepositoryVserverPtr *bool `xml:"is-repository-vserver"` - LanguagePtr *LanguageCodeType `xml:"language"` - LdapDomainPtr *string `xml:"ldap-domain"` - MaxVolumesPtr *string `xml:"max-volumes"` - NameMappingSwitchPtr *VserverInfoTypeNameMappingSwitch `xml:"name-mapping-switch"` - // work in progress - NameServerSwitchPtr *VserverInfoTypeNameServerSwitch `xml:"name-server-switch"` - // work in progress - NisDomainPtr *NisDomainType `xml:"nis-domain"` - OperationalStatePtr *VsoperstateType `xml:"operational-state"` - OperationalStateStoppedReasonPtr *VsopstopreasonType `xml:"operational-state-stopped-reason"` - QosPolicyGroupPtr *string `xml:"qos-policy-group"` - QuotaPolicyPtr *string `xml:"quota-policy"` - RootVolumePtr *VolumeNameType `xml:"root-volume"` - RootVolumeAggregatePtr *AggrNameType `xml:"root-volume-aggregate"` - RootVolumeSecurityStylePtr *SecurityStyleEnumType `xml:"root-volume-security-style"` - SnapshotPolicyPtr *SnapshotPolicyType `xml:"snapshot-policy"` - StatePtr *VsadminstateType `xml:"state"` - UuidPtr *UuidType `xml:"uuid"` - VolumeDeleteRetentionHoursPtr *int `xml:"volume-delete-retention-hours"` - VserverAggrInfoListPtr *VserverInfoTypeVserverAggrInfoList `xml:"vserver-aggr-info-list"` - // work in progress - VserverNamePtr *string `xml:"vserver-name"` - VserverSubtypePtr *string `xml:"vserver-subtype"` - VserverTypePtr *string `xml:"vserver-type"` -} - -// NewVserverInfoType is a factory method for creating new instances of VserverInfoType objects -func NewVserverInfoType() *VserverInfoType { - return &VserverInfoType{} -} - -// ToXML converts this object into an xml string representation -func (o *VserverInfoType) ToXML() (string, error) { - output, err := xml.MarshalIndent(o, " ", " ") - if err != nil { - log.Errorf("error: %v", err) - } - return string(output), err -} - -// String returns a string representation of this object's fields and implements the Stringer interface -func (o VserverInfoType) String() string { - return ToString(reflect.ValueOf(o)) -} - -// VserverInfoTypeAggrList is a wrapper -type VserverInfoTypeAggrList struct { - XMLName xml.Name `xml:"aggr-list"` - AggrNamePtr []AggrNameType `xml:"aggr-name"` -} - -// AggrName is a 'getter' method -func (o *VserverInfoTypeAggrList) AggrName() []AggrNameType { - r := o.AggrNamePtr - return r -} - -// SetAggrName is a fluent style 'setter' method that can be chained -func (o *VserverInfoTypeAggrList) SetAggrName(newValue []AggrNameType) *VserverInfoTypeAggrList { - newSlice := make([]AggrNameType, len(newValue)) - copy(newSlice, newValue) - o.AggrNamePtr = newSlice - return o -} - -// AggrList is a 'getter' method -func (o *VserverInfoType) AggrList() VserverInfoTypeAggrList { - r := *o.AggrListPtr - return r -} - -// SetAggrList is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetAggrList(newValue VserverInfoTypeAggrList) *VserverInfoType { - o.AggrListPtr = &newValue - return o -} - -// VserverInfoTypeAllowedProtocols is a wrapper -type VserverInfoTypeAllowedProtocols struct { - XMLName xml.Name `xml:"allowed-protocols"` - ProtocolPtr []ProtocolType `xml:"protocol"` -} - -// Protocol is a 'getter' method -func (o *VserverInfoTypeAllowedProtocols) Protocol() []ProtocolType { - r := o.ProtocolPtr - return r -} - -// SetProtocol is a fluent style 'setter' method that can be chained -func (o *VserverInfoTypeAllowedProtocols) SetProtocol(newValue []ProtocolType) *VserverInfoTypeAllowedProtocols { - newSlice := make([]ProtocolType, len(newValue)) - copy(newSlice, newValue) - o.ProtocolPtr = newSlice - return o -} - -// AllowedProtocols is a 'getter' method -func (o *VserverInfoType) AllowedProtocols() VserverInfoTypeAllowedProtocols { - r := *o.AllowedProtocolsPtr - return r -} - -// SetAllowedProtocols is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetAllowedProtocols(newValue VserverInfoTypeAllowedProtocols) *VserverInfoType { - o.AllowedProtocolsPtr = &newValue - return o -} - -// AntivirusOnAccessPolicy is a 'getter' method -func (o *VserverInfoType) AntivirusOnAccessPolicy() AntivirusPolicyType { - r := *o.AntivirusOnAccessPolicyPtr - return r -} - -// SetAntivirusOnAccessPolicy is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetAntivirusOnAccessPolicy(newValue AntivirusPolicyType) *VserverInfoType { - o.AntivirusOnAccessPolicyPtr = &newValue - return o -} - -// CachingPolicy is a 'getter' method -func (o *VserverInfoType) CachingPolicy() string { - r := *o.CachingPolicyPtr - return r -} - -// SetCachingPolicy is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetCachingPolicy(newValue string) *VserverInfoType { - o.CachingPolicyPtr = &newValue - return o -} - -// Comment is a 'getter' method -func (o *VserverInfoType) Comment() string { - r := *o.CommentPtr - return r -} - -// SetComment is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetComment(newValue string) *VserverInfoType { - o.CommentPtr = &newValue - return o -} - -// VserverInfoTypeDisallowedProtocols is a wrapper -type VserverInfoTypeDisallowedProtocols struct { - XMLName xml.Name `xml:"disallowed-protocols"` - ProtocolPtr []ProtocolType `xml:"protocol"` -} - -// Protocol is a 'getter' method -func (o *VserverInfoTypeDisallowedProtocols) Protocol() []ProtocolType { - r := o.ProtocolPtr - return r -} - -// SetProtocol is a fluent style 'setter' method that can be chained -func (o *VserverInfoTypeDisallowedProtocols) SetProtocol(newValue []ProtocolType) *VserverInfoTypeDisallowedProtocols { - newSlice := make([]ProtocolType, len(newValue)) - copy(newSlice, newValue) - o.ProtocolPtr = newSlice - return o -} - -// DisallowedProtocols is a 'getter' method -func (o *VserverInfoType) DisallowedProtocols() VserverInfoTypeDisallowedProtocols { - r := *o.DisallowedProtocolsPtr - return r -} - -// SetDisallowedProtocols is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetDisallowedProtocols(newValue VserverInfoTypeDisallowedProtocols) *VserverInfoType { - o.DisallowedProtocolsPtr = &newValue - return o -} - -// Ipspace is a 'getter' method -func (o *VserverInfoType) Ipspace() string { - r := *o.IpspacePtr - return r -} - -// SetIpspace is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetIpspace(newValue string) *VserverInfoType { - o.IpspacePtr = &newValue - return o -} - -// IsConfigLockedForChanges is a 'getter' method -func (o *VserverInfoType) IsConfigLockedForChanges() bool { - r := *o.IsConfigLockedForChangesPtr - return r -} - -// SetIsConfigLockedForChanges is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetIsConfigLockedForChanges(newValue bool) *VserverInfoType { - o.IsConfigLockedForChangesPtr = &newValue - return o -} - -// IsRepositoryVserver is a 'getter' method -func (o *VserverInfoType) IsRepositoryVserver() bool { - r := *o.IsRepositoryVserverPtr - return r -} - -// SetIsRepositoryVserver is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetIsRepositoryVserver(newValue bool) *VserverInfoType { - o.IsRepositoryVserverPtr = &newValue - return o -} - -// Language is a 'getter' method -func (o *VserverInfoType) Language() LanguageCodeType { - r := *o.LanguagePtr - return r -} - -// SetLanguage is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetLanguage(newValue LanguageCodeType) *VserverInfoType { - o.LanguagePtr = &newValue - return o -} - -// LdapDomain is a 'getter' method -func (o *VserverInfoType) LdapDomain() string { - r := *o.LdapDomainPtr - return r -} - -// SetLdapDomain is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetLdapDomain(newValue string) *VserverInfoType { - o.LdapDomainPtr = &newValue - return o -} - -// MaxVolumes is a 'getter' method -func (o *VserverInfoType) MaxVolumes() string { - r := *o.MaxVolumesPtr - return r -} - -// SetMaxVolumes is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetMaxVolumes(newValue string) *VserverInfoType { - o.MaxVolumesPtr = &newValue - return o -} - -// VserverInfoTypeNameMappingSwitch is a wrapper -type VserverInfoTypeNameMappingSwitch struct { - XMLName xml.Name `xml:"name-mapping-switch"` - NmswitchPtr []NmswitchType `xml:"nmswitch"` -} - -// Nmswitch is a 'getter' method -func (o *VserverInfoTypeNameMappingSwitch) Nmswitch() []NmswitchType { - r := o.NmswitchPtr - return r -} - -// SetNmswitch is a fluent style 'setter' method that can be chained -func (o *VserverInfoTypeNameMappingSwitch) SetNmswitch(newValue []NmswitchType) *VserverInfoTypeNameMappingSwitch { - newSlice := make([]NmswitchType, len(newValue)) - copy(newSlice, newValue) - o.NmswitchPtr = newSlice - return o -} - -// NameMappingSwitch is a 'getter' method -func (o *VserverInfoType) NameMappingSwitch() VserverInfoTypeNameMappingSwitch { - r := *o.NameMappingSwitchPtr - return r -} - -// SetNameMappingSwitch is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetNameMappingSwitch(newValue VserverInfoTypeNameMappingSwitch) *VserverInfoType { - o.NameMappingSwitchPtr = &newValue - return o -} - -// VserverInfoTypeNameServerSwitch is a wrapper -type VserverInfoTypeNameServerSwitch struct { - XMLName xml.Name `xml:"name-server-switch"` - NsswitchPtr []NsswitchType `xml:"nsswitch"` -} - -// Nsswitch is a 'getter' method -func (o *VserverInfoTypeNameServerSwitch) Nsswitch() []NsswitchType { - r := o.NsswitchPtr - return r -} - -// SetNsswitch is a fluent style 'setter' method that can be chained -func (o *VserverInfoTypeNameServerSwitch) SetNsswitch(newValue []NsswitchType) *VserverInfoTypeNameServerSwitch { - newSlice := make([]NsswitchType, len(newValue)) - copy(newSlice, newValue) - o.NsswitchPtr = newSlice - return o -} - -// NameServerSwitch is a 'getter' method -func (o *VserverInfoType) NameServerSwitch() VserverInfoTypeNameServerSwitch { - r := *o.NameServerSwitchPtr - return r -} - -// SetNameServerSwitch is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetNameServerSwitch(newValue VserverInfoTypeNameServerSwitch) *VserverInfoType { - o.NameServerSwitchPtr = &newValue - return o -} - -// NisDomain is a 'getter' method -func (o *VserverInfoType) NisDomain() NisDomainType { - r := *o.NisDomainPtr - return r -} - -// SetNisDomain is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetNisDomain(newValue NisDomainType) *VserverInfoType { - o.NisDomainPtr = &newValue - return o -} - -// OperationalState is a 'getter' method -func (o *VserverInfoType) OperationalState() VsoperstateType { - r := *o.OperationalStatePtr - return r -} - -// SetOperationalState is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetOperationalState(newValue VsoperstateType) *VserverInfoType { - o.OperationalStatePtr = &newValue - return o -} - -// OperationalStateStoppedReason is a 'getter' method -func (o *VserverInfoType) OperationalStateStoppedReason() VsopstopreasonType { - r := *o.OperationalStateStoppedReasonPtr - return r -} - -// SetOperationalStateStoppedReason is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetOperationalStateStoppedReason(newValue VsopstopreasonType) *VserverInfoType { - o.OperationalStateStoppedReasonPtr = &newValue - return o -} - -// QosPolicyGroup is a 'getter' method -func (o *VserverInfoType) QosPolicyGroup() string { - r := *o.QosPolicyGroupPtr - return r -} - -// SetQosPolicyGroup is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetQosPolicyGroup(newValue string) *VserverInfoType { - o.QosPolicyGroupPtr = &newValue - return o -} - -// QuotaPolicy is a 'getter' method -func (o *VserverInfoType) QuotaPolicy() string { - r := *o.QuotaPolicyPtr - return r -} - -// SetQuotaPolicy is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetQuotaPolicy(newValue string) *VserverInfoType { - o.QuotaPolicyPtr = &newValue - return o -} - -// RootVolume is a 'getter' method -func (o *VserverInfoType) RootVolume() VolumeNameType { - r := *o.RootVolumePtr - return r -} - -// SetRootVolume is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetRootVolume(newValue VolumeNameType) *VserverInfoType { - o.RootVolumePtr = &newValue - return o -} - -// RootVolumeAggregate is a 'getter' method -func (o *VserverInfoType) RootVolumeAggregate() AggrNameType { - r := *o.RootVolumeAggregatePtr - return r -} - -// SetRootVolumeAggregate is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetRootVolumeAggregate(newValue AggrNameType) *VserverInfoType { - o.RootVolumeAggregatePtr = &newValue - return o -} - -// RootVolumeSecurityStyle is a 'getter' method -func (o *VserverInfoType) RootVolumeSecurityStyle() SecurityStyleEnumType { - r := *o.RootVolumeSecurityStylePtr - return r -} - -// SetRootVolumeSecurityStyle is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetRootVolumeSecurityStyle(newValue SecurityStyleEnumType) *VserverInfoType { - o.RootVolumeSecurityStylePtr = &newValue - return o -} - -// SnapshotPolicy is a 'getter' method -func (o *VserverInfoType) SnapshotPolicy() SnapshotPolicyType { - r := *o.SnapshotPolicyPtr - return r -} - -// SetSnapshotPolicy is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetSnapshotPolicy(newValue SnapshotPolicyType) *VserverInfoType { - o.SnapshotPolicyPtr = &newValue - return o -} - -// State is a 'getter' method -func (o *VserverInfoType) State() VsadminstateType { - r := *o.StatePtr - return r -} - -// SetState is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetState(newValue VsadminstateType) *VserverInfoType { - o.StatePtr = &newValue - return o -} - -// Uuid is a 'getter' method -func (o *VserverInfoType) Uuid() UuidType { - r := *o.UuidPtr - return r -} - -// SetUuid is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetUuid(newValue UuidType) *VserverInfoType { - o.UuidPtr = &newValue - return o -} - -// VolumeDeleteRetentionHours is a 'getter' method -func (o *VserverInfoType) VolumeDeleteRetentionHours() int { - r := *o.VolumeDeleteRetentionHoursPtr - return r -} - -// SetVolumeDeleteRetentionHours is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetVolumeDeleteRetentionHours(newValue int) *VserverInfoType { - o.VolumeDeleteRetentionHoursPtr = &newValue - return o -} - -// VserverInfoTypeVserverAggrInfoList is a wrapper -type VserverInfoTypeVserverAggrInfoList struct { - XMLName xml.Name `xml:"vserver-aggr-info-list"` - VserverAggrInfoPtr []VserverAggrInfoType `xml:"vserver-aggr-info"` -} - -// VserverAggrInfo is a 'getter' method -func (o *VserverInfoTypeVserverAggrInfoList) VserverAggrInfo() []VserverAggrInfoType { - r := o.VserverAggrInfoPtr - return r -} - -// SetVserverAggrInfo is a fluent style 'setter' method that can be chained -func (o *VserverInfoTypeVserverAggrInfoList) SetVserverAggrInfo(newValue []VserverAggrInfoType) *VserverInfoTypeVserverAggrInfoList { - newSlice := make([]VserverAggrInfoType, len(newValue)) - copy(newSlice, newValue) - o.VserverAggrInfoPtr = newSlice - return o -} - -// VserverAggrInfoList is a 'getter' method -func (o *VserverInfoType) VserverAggrInfoList() VserverInfoTypeVserverAggrInfoList { - r := *o.VserverAggrInfoListPtr - return r -} - -// SetVserverAggrInfoList is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetVserverAggrInfoList(newValue VserverInfoTypeVserverAggrInfoList) *VserverInfoType { - o.VserverAggrInfoListPtr = &newValue - return o -} - -// VserverName is a 'getter' method -func (o *VserverInfoType) VserverName() string { - r := *o.VserverNamePtr - return r -} - -// SetVserverName is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetVserverName(newValue string) *VserverInfoType { - o.VserverNamePtr = &newValue - return o -} - -// VserverSubtype is a 'getter' method -func (o *VserverInfoType) VserverSubtype() string { - r := *o.VserverSubtypePtr - return r -} - -// SetVserverSubtype is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetVserverSubtype(newValue string) *VserverInfoType { - o.VserverSubtypePtr = &newValue - return o -} - -// VserverType is a 'getter' method -func (o *VserverInfoType) VserverType() string { - r := *o.VserverTypePtr - return r -} - -// SetVserverType is a fluent style 'setter' method that can be chained -func (o *VserverInfoType) SetVserverType(newValue string) *VserverInfoType { - o.VserverTypePtr = &newValue - return o -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-name.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-name.go deleted file mode 100644 index 00e26a04d..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vserver-name.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// VserverNameType is a structure to represent a vserver-name ZAPI object -type VserverNameType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsoperstate.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsoperstate.go deleted file mode 100644 index 62fff7ad2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsoperstate.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// VsoperstateType is a structure to represent a vsoperstate ZAPI object -type VsoperstateType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsopstopreason.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsopstopreason.go deleted file mode 100644 index a07db0710..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/azgo/type-vsopstopreason.go +++ /dev/null @@ -1,4 +0,0 @@ -package azgo - -// VsopstopreasonType is a structure to represent a vsopstopreason ZAPI object -type VsopstopreasonType = string diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/ontap.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/api/ontap.go deleted file mode 100644 index 73d0d1f70..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/api/ontap.go +++ /dev/null @@ -1,2171 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "runtime/debug" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -const ( - defaultZapiRecords = 100 - maxZapiRecords = 0xfffffffe - NumericalValueNotSet = -1 - maxFlexGroupWait = 30 * time.Second -) - -// ClientConfig holds the configuration data for Client objects -type ClientConfig struct { - ManagementLIF string - SVM string - Username string - Password string - DriverContext tridentconfig.DriverContext - ContextBasedZapiRecords int - DebugTraceFlags map[string]bool -} - -// Client is the object to use for interacting with ONTAP controllers -type Client struct { - config ClientConfig - zr *azgo.ZapiRunner - m *sync.Mutex - SVMUUID string -} - -// NewClient is a factory method for creating a new instance -func NewClient(config ClientConfig) *Client { - - // When running in Docker context we want to request MAX number of records from ZAPI for Volume, LUNs and Qtrees - config.ContextBasedZapiRecords = defaultZapiRecords - if config.DriverContext == tridentconfig.ContextDocker { - config.ContextBasedZapiRecords = maxZapiRecords - } - - d := &Client{ - config: config, - zr: &azgo.ZapiRunner{ - ManagementLIF: config.ManagementLIF, - SVM: config.SVM, - Username: config.Username, - Password: config.Password, - Secure: true, - DebugTraceFlags: config.DebugTraceFlags, - }, - m: &sync.Mutex{}, - } - return d -} - -// GetClonedZapiRunner returns a clone of the ZapiRunner configured on this driver. -func (d Client) GetClonedZapiRunner() *azgo.ZapiRunner { - clone := new(azgo.ZapiRunner) - *clone = *d.zr - return clone -} - -// GetNontunneledZapiRunner returns a clone of the ZapiRunner configured on this driver with the SVM field cleared so ZAPI calls -// made with the resulting runner aren't tunneled. Note that the calls could still go directly to either a cluster or -// vserver management LIF. -func (d Client) GetNontunneledZapiRunner() *azgo.ZapiRunner { - clone := new(azgo.ZapiRunner) - *clone = *d.zr - clone.SVM = "" - return clone -} - -// NewZapiError accepts the Response value from any AZGO call, extracts the status, reason, and errno values, -// and returns a ZapiError. The interface passed in may either be a Response object, or the always-embedded -// Result object where the error info exists. -func NewZapiError(zapiResult interface{}) (err ZapiError) { - defer func() { - if r := recover(); r != nil { - err = ZapiError{} - } - }() - - if zapiResult != nil { - val := NewZapiResultValue(zapiResult) - if reflect.TypeOf(zapiResult).Kind() == reflect.Ptr { - val = reflect.Indirect(val) - } - - err = ZapiError{ - val.FieldByName("ResultStatusAttr").String(), - val.FieldByName("ResultReasonAttr").String(), - val.FieldByName("ResultErrnoAttr").String(), - } - } else { - err = ZapiError{} - err.code = azgo.EINTERNALERROR - err.reason = "unexpected nil ZAPI result" - err.status = "failed" - } - - return err -} - -// NewZapiAsyncResult accepts the Response value from any AZGO Async Request, extracts the status, jobId, and -// errorCode values and returns a ZapiAsyncResult. -func NewZapiAsyncResult(zapiResult interface{}) (result ZapiAsyncResult, err error) { - defer func() { - if r := recover(); r != nil { - err = ZapiError{} - } - }() - - var jobId int64 - var status string - var errorCode int64 - - val := NewZapiResultValue(zapiResult) - if reflect.TypeOf(zapiResult).Kind() == reflect.Ptr { - val = reflect.Indirect(val) - } - - switch obj := zapiResult.(type) { - case azgo.VolumeModifyIterAsyncResponse: - log.Debugf("NewZapiAsyncResult - processing VolumeModifyIterAsyncResponse: %v", obj) - // Handle ZAPI result for response object that contains a list of one item with the needed job information. - volModifyResult := val.Interface().(azgo.VolumeModifyIterAsyncResponseResult) - if volModifyResult.NumSucceededPtr != nil && *volModifyResult.NumSucceededPtr > 0 { - if volModifyResult.SuccessListPtr != nil && volModifyResult.SuccessListPtr.VolumeModifyIterAsyncInfoPtr != nil { - volInfoType := volModifyResult.SuccessListPtr.VolumeModifyIterAsyncInfoPtr[0] - if volInfoType.JobidPtr != nil { - jobId = int64(*volInfoType.JobidPtr) - } - if volInfoType.StatusPtr != nil { - status = *volInfoType.StatusPtr - } - if volInfoType.ErrorCodePtr != nil { - errorCode = int64(*volInfoType.ErrorCodePtr) - } - } - } - default: - if s := val.FieldByName("ResultStatusPtr"); !s.IsNil() { - status = s.Elem().String() - } - if j := val.FieldByName("ResultJobidPtr"); !j.IsNil() { - jobId = j.Elem().Int() - } - if e := val.FieldByName("ResultErrorCodePtr"); !e.IsNil() { - errorCode = e.Elem().Int() - } - } - - result = ZapiAsyncResult{ - int(jobId), - status, - int(errorCode), - } - - return result, err -} - -// NewZapiResultValue obtains the Result from an AZGO Response object and returns the Result -func NewZapiResultValue(zapiResult interface{}) reflect.Value { - // A ZAPI Result struct works as-is, but a ZAPI Response struct must have its - // embedded Result struct extracted via reflection. - val := reflect.ValueOf(zapiResult) - if reflect.TypeOf(zapiResult).Kind() == reflect.Ptr { - val = reflect.Indirect(val) - } - if testResult := val.FieldByName("Result"); testResult.IsValid() { - zapiResult = testResult.Interface() - val = reflect.ValueOf(zapiResult) - } - return val -} - -// ZapiAsyncResult encap -type ZapiAsyncResult struct { - jobId int - status string - errorCode int -} - -// ZapiError encapsulates the status, reason, and errno values from a ZAPI invocation, and it provides helper methods for detecting -// common error conditions. -type ZapiError struct { - status string - reason string - code string -} - -func (e ZapiError) IsPassed() bool { - return e.status == "passed" -} -func (e ZapiError) Error() string { - if e.IsPassed() { - return "API status: passed" - } - return fmt.Sprintf("API status: %s, Reason: %s, Code: %s", e.status, e.reason, e.code) -} -func (e ZapiError) IsPrivilegeError() bool { - return e.code == azgo.EAPIPRIVILEGE -} -func (e ZapiError) IsScopeError() bool { - return e.code == azgo.EAPIPRIVILEGE || e.code == azgo.EAPINOTFOUND -} -func (e ZapiError) IsFailedToLoadJobError() bool { - return e.code == azgo.EINTERNALERROR && strings.Contains(e.reason, "Failed to load job") -} -func (e ZapiError) Status() string { - return e.status -} -func (e ZapiError) Reason() string { - return e.reason -} -func (e ZapiError) Code() string { - return e.code -} - -// GetError accepts both an error and the Response value from an AZGO invocation. -// If error is non-nil, it is returned as is. Otherwise, the Response value is -// probed for an error returned by ZAPI; if one is found, a ZapiError error object -// is returned. If no failures are detected, the method returns nil. The interface -// passed in may either be a Response object, or the always-embedded Result object -// where the error info exists. -func GetError(zapiResult interface{}, errorIn error) (errorOut error) { - - defer func() { - if r := recover(); r != nil { - log.Errorf("Panic in ontap#GetError. %v\nStack Trace: %v", zapiResult, string(debug.Stack())) - errorOut = ZapiError{} - } - }() - - // A ZAPI Result struct works as-is, but a ZAPI Response struct must have its - // embedded Result struct extracted via reflection. - if zapiResult != nil { - val := reflect.ValueOf(zapiResult) - if reflect.TypeOf(zapiResult).Kind() == reflect.Ptr { - val = reflect.Indirect(val) - if val.IsValid() { - if testResult := val.FieldByName("Result"); testResult.IsValid() { - zapiResult = testResult.Interface() - } - } - } - } - - errorOut = nil - - if errorIn != nil { - errorOut = errorIn - } else if zerr := NewZapiError(zapiResult); !zerr.IsPassed() { - errorOut = zerr - } - - return -} - -///////////////////////////////////////////////////////////////////////////// -// API feature operations BEGIN - -// API functions are named in a NounVerb pattern. This reflects how the azgo -// functions are also named. (i.e. VolumeGet instead of GetVolume) - -type feature string - -// Define new version-specific feature constants here -const ( - MinimumONTAPIVersion feature = "MINIMUM_ONTAPI_VERSION" - NetAppFlexGroups feature = "NETAPP_FLEXGROUPS" - NetAppFabricPoolFlexVol feature = "NETAPP_FABRICPOOL_FLEXVOL" - NetAppFabricPoolFlexGroup feature = "NETAPP_FABRICPOOL_FLEXGROUP" - LunGeometrySkip feature = "LUN_GEOMETRY_SKIP" -) - -// Indicate the minimum Ontapi version for each feature here -var features = map[feature]*utils.Version{ - MinimumONTAPIVersion: utils.MustParseSemantic("1.110.0"), // cDOT 9.1.0 - NetAppFlexGroups: utils.MustParseSemantic("1.120.0"), // cDOT 9.2.0 - NetAppFabricPoolFlexVol: utils.MustParseSemantic("1.120.0"), // cDOT 9.2.0 - NetAppFabricPoolFlexGroup: utils.MustParseSemantic("1.150.0"), // cDOT 9.5.0 - LunGeometrySkip: utils.MustParseSemantic("1.150.0"), // cDOT 9.5.0 -} - -// SupportsFeature returns true if the Ontapi version supports the supplied feature -func (d Client) SupportsFeature(feature feature) bool { - - ontapiVersion, err := d.SystemGetOntapiVersion() - if err != nil { - return false - } - - ontapiSemVer, err := utils.ParseSemantic(fmt.Sprintf("%s.0", ontapiVersion)) - if err != nil { - return false - } - - if minVersion, ok := features[feature]; ok { - return ontapiSemVer.AtLeast(minVersion) - } else { - return false - } -} - -// API feature operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// IGROUP operations BEGIN - -// IgroupCreate creates the specified initiator group -// equivalent to filer::> igroup create docker -vserver iscsi_vs -protocol iscsi -ostype linux -func (d Client) IgroupCreate(initiatorGroupName, initiatorGroupType, osType string) (*azgo.IgroupCreateResponse, error) { - response, err := azgo.NewIgroupCreateRequest(). - SetInitiatorGroupName(initiatorGroupName). - SetInitiatorGroupType(initiatorGroupType). - SetOsType(osType). - ExecuteUsing(d.zr) - return response, err -} - -// IgroupAdd adds an initiator to an initiator group -// equivalent to filer::> igroup add -vserver iscsi_vs -igroup docker -initiator iqn.1993-08.org.debian:01:9031309bbebd -func (d Client) IgroupAdd(initiatorGroupName, initiator string) (*azgo.IgroupAddResponse, error) { - response, err := azgo.NewIgroupAddRequest(). - SetInitiatorGroupName(initiatorGroupName). - SetInitiator(initiator). - ExecuteUsing(d.zr) - return response, err -} - -// IgroupRemove removes an initiator from an initiator group -func (d Client) IgroupRemove(initiatorGroupName, initiator string, force bool) (*azgo.IgroupRemoveResponse, error) { - response, err := azgo.NewIgroupRemoveRequest(). - SetInitiatorGroupName(initiatorGroupName). - SetInitiator(initiator). - SetForce(force). - ExecuteUsing(d.zr) - return response, err -} - -// IgroupDestroy destroys an initiator group -func (d Client) IgroupDestroy(initiatorGroupName string) (*azgo.IgroupDestroyResponse, error) { - response, err := azgo.NewIgroupDestroyRequest(). - SetInitiatorGroupName(initiatorGroupName). - ExecuteUsing(d.zr) - return response, err -} - -// IgroupList lists initiator groups -func (d Client) IgroupList() (*azgo.IgroupGetIterResponse, error) { - response, err := azgo.NewIgroupGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(d.zr) - return response, err -} - -// IGROUP operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// LUN operations BEGIN - -// LunCreate creates a lun with the specified attributes -// equivalent to filer::> lun create -vserver iscsi_vs -path /vol/v/lun1 -size 1g -ostype linux -space-reserve disabled -space-allocation enabled -func (d Client) LunCreate(lunPath string, sizeInBytes int, osType string, spaceReserved bool, spaceAllocated bool) (*azgo.LunCreateBySizeResponse, error) { - response, err := azgo.NewLunCreateBySizeRequest(). - SetPath(lunPath). - SetSize(sizeInBytes). - SetOstype(osType). - SetSpaceReservationEnabled(spaceReserved). - SetSpaceAllocationEnabled(spaceAllocated). - ExecuteUsing(d.zr) - return response, err -} - -// LunCloneCreate clones a LUN from a snapshot -func (d Client) LunCloneCreate(volumeName, sourceLun, destinationLun string) (*azgo.CloneCreateResponse, error) { - response, err := azgo.NewCloneCreateRequest(). - SetVolume(volumeName). - SetSourcePath(sourceLun). - SetDestinationPath(destinationLun). - ExecuteUsing(d.zr) - return response, err -} - -// LunGetSerialNumber returns the serial# for a lun -func (d Client) LunGetSerialNumber(lunPath string) (*azgo.LunGetSerialNumberResponse, error) { - response, err := azgo.NewLunGetSerialNumberRequest(). - SetPath(lunPath). - ExecuteUsing(d.zr) - return response, err -} - -// LunMap maps a lun to an id in an initiator group -// equivalent to filer::> lun map -vserver iscsi_vs -path /vol/v/lun1 -igroup docker -lun-id 0 -func (d Client) LunMap(initiatorGroupName, lunPath string, lunID int) (*azgo.LunMapResponse, error) { - response, err := azgo.NewLunMapRequest(). - SetInitiatorGroup(initiatorGroupName). - SetPath(lunPath). - SetLunId(lunID). - ExecuteUsing(d.zr) - return response, err -} - -// LunMapAutoID maps a LUN in an initiator group, allowing ONTAP to choose an available LUN ID -// equivalent to filer::> lun map -vserver iscsi_vs -path /vol/v/lun1 -igroup docker -func (d Client) LunMapAutoID(initiatorGroupName, lunPath string) (*azgo.LunMapResponse, error) { - response, err := azgo.NewLunMapRequest(). - SetInitiatorGroup(initiatorGroupName). - SetPath(lunPath). - ExecuteUsing(d.zr) - return response, err -} - -func (d Client) LunMapIfNotMapped(initiatorGroupName, lunPath string) (int, error) { - - // Read LUN maps to see if the LUN is already mapped to the igroup - lunMapListResponse, err := d.LunMapListInfo(lunPath) - if err != nil { - return -1, fmt.Errorf("problem reading maps for LUN %s: %v", lunPath, err) - } else if lunMapListResponse.Result.ResultStatusAttr != "passed" { - return -1, fmt.Errorf("problem reading maps for LUN %s: %+v", lunPath, lunMapListResponse.Result) - } - - lunID := 0 - alreadyMapped := false - if lunMapListResponse.Result.InitiatorGroupsPtr != nil { - for _, igroup := range lunMapListResponse.Result.InitiatorGroupsPtr.InitiatorGroupInfoPtr { - if igroup.InitiatorGroupName() == initiatorGroupName { - - lunID = igroup.LunId() - alreadyMapped = true - - log.WithFields(log.Fields{ - "lun": lunPath, - "igroup": initiatorGroupName, - "id": lunID, - }).Debug("LUN already mapped.") - - break - } - } - } - - // Map IFF not already mapped - if !alreadyMapped { - lunMapResponse, err := d.LunMapAutoID(initiatorGroupName, lunPath) - if err != nil { - return -1, fmt.Errorf("problem mapping LUN %s: %v", lunPath, err) - } else if lunMapResponse.Result.ResultStatusAttr != "passed" { - return -1, fmt.Errorf("problem mapping LUN %s: %+v", lunPath, lunMapResponse.Result) - } - - lunID = lunMapResponse.Result.LunIdAssigned() - - log.WithFields(log.Fields{ - "lun": lunPath, - "igroup": initiatorGroupName, - "id": lunID, - }).Debug("LUN mapped.") - } - - return lunID, nil -} - -// LunMapListInfo returns lun mapping information for the specified lun -// equivalent to filer::> lun mapped show -vserver iscsi_vs -path /vol/v/lun0 -func (d Client) LunMapListInfo(lunPath string) (*azgo.LunMapListInfoResponse, error) { - response, err := azgo.NewLunMapListInfoRequest(). - SetPath(lunPath). - ExecuteUsing(d.zr) - return response, err -} - -// LunOffline offlines a lun -// equivalent to filer::> lun offline -vserver iscsi_vs -path /vol/v/lun0 -func (d Client) LunOffline(lunPath string) (*azgo.LunOfflineResponse, error) { - response, err := azgo.NewLunOfflineRequest(). - SetPath(lunPath). - ExecuteUsing(d.zr) - return response, err -} - -// LunOnline onlines a lun -// equivalent to filer::> lun online -vserver iscsi_vs -path /vol/v/lun0 -func (d Client) LunOnline(lunPath string) (*azgo.LunOnlineResponse, error) { - response, err := azgo.NewLunOnlineRequest(). - SetPath(lunPath). - ExecuteUsing(d.zr) - return response, err -} - -// LunDestroy destroys a LUN -// equivalent to filer::> lun destroy -vserver iscsi_vs -path /vol/v/lun0 -func (d Client) LunDestroy(lunPath string) (*azgo.LunDestroyResponse, error) { - response, err := azgo.NewLunDestroyRequest(). - SetPath(lunPath). - ExecuteUsing(d.zr) - return response, err -} - -// LunSetAttribute sets a named attribute for a given LUN. -func (d Client) LunSetAttribute(lunPath, name, value string) (*azgo.LunSetAttributeResponse, error) { - response, err := azgo.NewLunSetAttributeRequest(). - SetPath(lunPath). - SetName(name). - SetValue(value). - ExecuteUsing(d.zr) - return response, err -} - -// LunGetAttribute gets a named attribute for a given LUN. -func (d Client) LunGetAttribute(lunPath, name string) (*azgo.LunGetAttributeResponse, error) { - response, err := azgo.NewLunGetAttributeRequest(). - SetPath(lunPath). - SetName(name). - ExecuteUsing(d.zr) - return response, err -} - -// LunGet returns all relevant details for a single LUN -// equivalent to filer::> lun show -func (d Client) LunGet(path string) (*azgo.LunInfoType, error) { - - // Limit the LUNs to the one matching the path - query := &azgo.LunGetIterRequestQuery{} - lunInfo := azgo.NewLunInfoType(). - SetPath(path) - query.SetLunInfo(*lunInfo) - - // Limit the returned data to only the data relevant to containers - desiredAttributes := &azgo.LunGetIterRequestDesiredAttributes{} - lunInfo = azgo.NewLunInfoType(). - SetPath(""). - SetVolume(""). - SetSize(0). - SetCreationTimestamp(0) - desiredAttributes.SetLunInfo(*lunInfo) - - response, err := azgo.NewLunGetIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - - if err != nil { - return &azgo.LunInfoType{}, err - } else if response.Result.NumRecords() == 0 { - return &azgo.LunInfoType{}, fmt.Errorf("LUN %s not found", path) - } else if response.Result.NumRecords() > 1 { - return &azgo.LunInfoType{}, fmt.Errorf("more than one LUN %s found", path) - } else if response.Result.AttributesListPtr == nil { - return &azgo.LunInfoType{}, fmt.Errorf("LUN %s not found", path) - } else if response.Result.AttributesListPtr.LunInfoPtr != nil { - return &response.Result.AttributesListPtr.LunInfoPtr[0], nil - } - return &azgo.LunInfoType{}, fmt.Errorf("LUN %s not found", path) -} - -func (d Client) lunGetAllCommon(query *azgo.LunGetIterRequestQuery) (*azgo.LunGetIterResponse, error) { - // Limit the returned data to only the data relevant to containers - desiredAttributes := &azgo.LunGetIterRequestDesiredAttributes{} - lunInfo := azgo.NewLunInfoType(). - SetPath(""). - SetVolume(""). - SetSize(0). - SetCreationTimestamp(0) - desiredAttributes.SetLunInfo(*lunInfo) - - response, err := azgo.NewLunGetIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -func (d Client) LunGetGeometry(path string) (*azgo.LunGetGeometryResponse, error) { - response, err := azgo.NewLunGetGeometryRequest(). - SetPath(path). - ExecuteUsing(d.zr) - return response, err -} - -func (d Client) LunResize(path string, sizeBytes int) (uint64, error) { - response, err := azgo.NewLunResizeRequest(). - SetPath(path). - SetSize(sizeBytes). - ExecuteUsing(d.zr) - - var errSize uint64 = 0 - if err != nil { - return errSize, err - } - - if zerr := NewZapiError(response); !zerr.IsPassed() { - return errSize, zerr - } - - result := NewZapiResultValue(response) - if sizePtr := result.FieldByName("ActualSizePtr"); !sizePtr.IsNil() { - size := sizePtr.Elem().Int() - if size < 0 { - return errSize, fmt.Errorf("lun resize operation return an invalid size") - } else { - return uint64(size), nil - } - } else { - return errSize, fmt.Errorf("error parsing result size") - } -} - -// LunGetAll returns all relevant details for all LUNs whose paths match the supplied pattern -// equivalent to filer::> lun show -path /vol/trident_*/* -func (d Client) LunGetAll(pathPattern string) (*azgo.LunGetIterResponse, error) { - - // Limit LUNs to those matching the pathPattern; ex, "/vol/trident_*/*" - query := &azgo.LunGetIterRequestQuery{} - lunInfo := azgo.NewLunInfoType(). - SetPath(pathPattern) - query.SetLunInfo(*lunInfo) - - return d.lunGetAllCommon(query) -} - -// LunGetAllForVolume returns all relevant details for all LUNs in the supplied Volume -// equivalent to filer::> lun show -volume trident_CEwDWXQRPz -func (d Client) LunGetAllForVolume(volumeName string) (*azgo.LunGetIterResponse, error) { - - // Limit LUNs to those owned by the volumeName; ex, "trident_trident" - query := &azgo.LunGetIterRequestQuery{} - lunInfo := azgo.NewLunInfoType(). - SetVolume(volumeName) - query.SetLunInfo(*lunInfo) - - return d.lunGetAllCommon(query) -} - -// LunCount returns the number of LUNs that exist in a given volume -func (d Client) LunCount(volume string) (int, error) { - - // Limit the LUNs to those in the specified Flexvol - query := &azgo.LunGetIterRequestQuery{} - lunInfo := azgo.NewLunInfoType().SetVolume(volume) - query.SetLunInfo(*lunInfo) - - // Limit the returned data to only the Flexvol and LUN names - desiredAttributes := &azgo.LunGetIterRequestDesiredAttributes{} - desiredInfo := azgo.NewLunInfoType().SetPath("").SetVolume("") - desiredAttributes.SetLunInfo(*desiredInfo) - - response, err := azgo.NewLunGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - if err = GetError(response, err); err != nil { - return 0, err - } - - return response.Result.NumRecords(), nil -} - -// LUN operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// FlexGroup operations BEGIN - -// FlexGroupCreate creates a FlexGroup with the specified options -// equivalent to filer::> volume create -vserver svm_name -volume fg_vol_name –auto-provision-as flexgroup -size fg_size -state online -type RW -policy default -unix-permissions ---rwxr-xr-x -space-guarantee none -snapshot-policy none -security-style unix -encrypt false -func (d Client) FlexGroupCreate( - name string, size int, aggrs []azgo.AggrNameType, spaceReserve, snapshotPolicy, unixPermissions, - exportPolicy, securityStyle string, encrypt bool, snapshotReserve int, -) (*azgo.VolumeCreateAsyncResponse, error) { - - junctionPath := fmt.Sprintf("/%s", name) - - aggrList := azgo.VolumeCreateAsyncRequestAggrList{} - aggrList.SetAggrName(aggrs) - - request := azgo.NewVolumeCreateAsyncRequest(). - SetVolumeName(name). - SetSize(size). - SetSnapshotPolicy(snapshotPolicy). - SetSpaceReserve(spaceReserve). - SetUnixPermissions(unixPermissions). - SetExportPolicy(exportPolicy). - SetVolumeSecurityStyle(securityStyle). - SetEncrypt(encrypt). - SetAggrList(aggrList). - SetJunctionPath(junctionPath) - - if snapshotReserve != NumericalValueNotSet { - request.SetPercentageSnapshotReserve(snapshotReserve) - } - - if d.SupportsFeature(NetAppFabricPoolFlexGroup) { - request.SetTieringPolicy("none") - } - - response, err := request.ExecuteUsing(d.zr) - if zerr := GetError(*response, err); zerr != nil { - return response, zerr - } - - err = d.waitForAsyncResponse(*response, time.Duration(maxFlexGroupWait)) - if err != nil { - return response, fmt.Errorf("error waiting for response: %v", err) - } - - return response, err -} - -// FlexGroupDestroy destroys a FlexGroup -func (d Client) FlexGroupDestroy(name string, force bool) (*azgo.VolumeDestroyAsyncResponse, error) { - response, err := azgo.NewVolumeDestroyAsyncRequest(). - SetVolumeName(name). - SetUnmountAndOffline(force). - ExecuteUsing(d.zr) - - if zerr := NewZapiError(*response); !zerr.IsPassed() { - // It's not an error if the volume no longer exists - if zerr.Code() == azgo.EVOLUMEDOESNOTEXIST { - log.WithField("volume", name).Warn("FlexGroup already deleted.") - return response, nil - } - } - - if gerr := GetError(response, err); gerr != nil { - return response, gerr - } - - err = d.waitForAsyncResponse(*response, time.Duration(maxFlexGroupWait)) - if err != nil { - return response, fmt.Errorf("error waiting for response: %v", err) - } - - return response, err -} - -// FlexGroupExists tests for the existence of a FlexGroup -func (d Client) FlexGroupExists(name string) (bool, error) { - response, err := azgo.NewVolumeSizeAsyncRequest(). - SetVolumeName(name). - ExecuteUsing(d.zr) - - if zerr := NewZapiError(response); !zerr.IsPassed() { - switch zerr.Code() { - case azgo.EOBJECTNOTFOUND, azgo.EVOLUMEDOESNOTEXIST: - return false, nil - default: - return false, zerr - } - } - - if gerr := GetError(response, err); gerr != nil { - return false, gerr - } - - // Wait for Async Job to complete - err = d.waitForAsyncResponse(response, time.Duration(maxFlexGroupWait)) - if err != nil { - return false, fmt.Errorf("error waiting for response: %v", err) - } - - return true, nil -} - -// FlexGroupSize retrieves the size of the specified volume -func (d Client) FlexGroupSize(name string) (int, error) { - volAttrs, err := d.FlexGroupGet(name) - if err != nil { - return 0, err - } - if volAttrs == nil { - return 0, fmt.Errorf("error getting size for FlexGroup: %v", name) - } - - volSpaceAttrs := volAttrs.VolumeSpaceAttributes() - return volSpaceAttrs.Size(), nil -} - -// FlexGroupSetSize sets the size of the specified FlexGroup -func (d Client) FlexGroupSetSize(name, newSize string) (*azgo.VolumeSizeAsyncResponse, error) { - response, err := azgo.NewVolumeSizeAsyncRequest(). - SetVolumeName(name). - SetNewSize(newSize). - ExecuteUsing(d.zr) - - if zerr := GetError(*response, err); zerr != nil { - return response, zerr - } - - err = d.waitForAsyncResponse(*response, time.Duration(maxFlexGroupWait)) - if err != nil { - return response, fmt.Errorf("error waiting for response: %v", err) - } - - return response, err -} - -// FlexGroupVolumeDisableSnapshotDirectoryAccess disables access to the ".snapshot" directory -// Disable '.snapshot' to allow official mysql container's chmod-in-init to work -func (d Client) FlexGroupVolumeDisableSnapshotDirectoryAccess(name string) (*azgo.VolumeModifyIterAsyncResponse, error) { - - volattr := &azgo.VolumeModifyIterAsyncRequestAttributes{} - ssattr := azgo.NewVolumeSnapshotAttributesType().SetSnapdirAccessEnabled(false) - volSnapshotAttrs := azgo.NewVolumeAttributesType().SetVolumeSnapshotAttributes(*ssattr) - volattr.SetVolumeAttributes(*volSnapshotAttrs) - - queryattr := &azgo.VolumeModifyIterAsyncRequestQuery{} - volidattr := azgo.NewVolumeIdAttributesType().SetName(azgo.VolumeNameType(name)) - volIdAttrs := azgo.NewVolumeAttributesType().SetVolumeIdAttributes(*volidattr) - queryattr.SetVolumeAttributes(*volIdAttrs) - - response, err := azgo.NewVolumeModifyIterAsyncRequest(). - SetQuery(*queryattr). - SetAttributes(*volattr). - ExecuteUsing(d.zr) - - if zerr := GetError(response, err); zerr != nil { - return response, zerr - } - - err = d.waitForAsyncResponse(*response, time.Duration(maxFlexGroupWait)) - if err != nil { - return response, fmt.Errorf("error waiting for response: %v", err) - } - - return response, err -} - -// FlexGroupGet returns all relevant details for a single FlexGroup -func (d Client) FlexGroupGet(name string) (*azgo.VolumeAttributesType, error) { - // Limit the FlexGroups to the one matching the name - queryVolIDAttrs := azgo.NewVolumeIdAttributesType().SetName(azgo.VolumeNameType(name)) - queryVolIDAttrs.SetStyleExtended("flexgroup") - return d.volumeGetIterCommon(name, queryVolIDAttrs) -} - -// FlexGroupGetAll returns all relevant details for all FlexGroups whose names match the supplied prefix -func (d Client) FlexGroupGetAll(prefix string) (*azgo.VolumeGetIterResponse, error) { - // Limit the FlexGroups to those matching the name prefix - queryVolIDAttrs := azgo.NewVolumeIdAttributesType().SetName(azgo.VolumeNameType(prefix + "*")) - queryVolStateAttrs := azgo.NewVolumeStateAttributesType().SetState("online") - queryVolIDAttrs.SetStyleExtended("flexgroup") - return d.volumeGetIterAll(prefix, queryVolIDAttrs, queryVolStateAttrs) -} - -// waitForAsyncResponse handles waiting for an AsyncResponse to return successfully or return an error. -func (d Client) waitForAsyncResponse(zapiResult interface{}, maxWaitTime time.Duration) error { - - asyncResult, err := NewZapiAsyncResult(zapiResult) - if err != nil { - return err - } - - // Possible values: "succeeded", "in_progress", "failed". Returns nil if succeeded - if asyncResult.status == "in_progress" { - // handle zapi response - jobId := int(asyncResult.jobId) - if asyncResponseError := d.checkForJobCompletion(jobId, maxWaitTime); asyncResponseError != nil { - return asyncResponseError - } - } else if asyncResult.status == "failed" { - return fmt.Errorf("result status is failed with errorCode %d", asyncResult.errorCode) - } - - return nil -} - -// checkForJobCompletion polls for the ONTAP job status success with backoff retry logic -func (d *Client) checkForJobCompletion(jobId int, maxWaitTime time.Duration) error { - - checkJobFinished := func() error { - jobResponse, err := d.JobGetIterStatus(jobId) - if err != nil { - return fmt.Errorf("error occurred getting job status for job ID %d: %v", jobId, jobResponse.Result) - } - if jobResponse.Result.ResultStatusAttr != "passed" { - return fmt.Errorf("failed to get job status for job ID %d: %v ", jobId, jobResponse.Result) - } - - if jobResponse.Result.AttributesListPtr == nil { - return fmt.Errorf("failed to get job status for job ID %d: %v ", jobId, jobResponse.Result) - } - - jobState := jobResponse.Result.AttributesListPtr.JobInfoPtr[0].JobState() - log.WithFields(log.Fields{ - "jobId": jobId, - "jobState": jobState, - }).Debug("Job status for job ID") - // Check for an error with the job. If found return Permanent error to halt backoff. - if jobState == "failure" || jobState == "error" || jobState == "quit" || jobState == "dead" { - err = fmt.Errorf("job %d failed to complete. job state: %v", jobId, jobState) - return backoff.Permanent(err) - } - if jobState != "success" { - return fmt.Errorf("job %d is not yet completed. job state: %v", jobId, jobState) - } - return nil - } - - jobCompletedNotify := func(err error, duration time.Duration) { - log.WithField("duration", duration). - Debug("Job not yet completed, waiting.") - } - - inProgressBackoff := asyncResponseBackoff(maxWaitTime) - - // Run the job completion check using an exponential backoff - if err := backoff.RetryNotify(checkJobFinished, inProgressBackoff, jobCompletedNotify); err != nil { - log.Warnf("Job not completed after %v seconds.", inProgressBackoff.MaxElapsedTime.Seconds()) - return fmt.Errorf("job Id %d failed to complete successfully", jobId) - } else { - //log.WithField("volume", name).Debug("Volume found.") - log.WithField("jobId", jobId).Debug("Job completed successfully.") - return nil - } -} - -func asyncResponseBackoff(maxWaitTime time.Duration) *backoff.ExponentialBackOff { - inProgressBackoff := backoff.NewExponentialBackOff() - inProgressBackoff.InitialInterval = 1 * time.Second - inProgressBackoff.Multiplier = 2 - inProgressBackoff.RandomizationFactor = 0.1 - - inProgressBackoff.MaxElapsedTime = maxWaitTime - return inProgressBackoff -} - -// JobGetIterStatus returns the current job status for Async requests. -func (d Client) JobGetIterStatus(jobId int) (*azgo.JobGetIterResponse, error) { - jobInfo := azgo.NewJobInfoType().SetJobId(jobId) - queryAttr := &azgo.JobGetIterRequestQuery{} - queryAttr.SetJobInfo(*jobInfo) - - response, err := azgo.NewJobGetIterRequest(). - SetQuery(*queryAttr). - ExecuteUsing(d.GetNontunneledZapiRunner()) - return response, err -} - -// FlexGroup operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// VOLUME operations BEGIN - -// VolumeCreate creates a volume with the specified options -// equivalent to filer::> volume create -vserver iscsi_vs -volume v -aggregate aggr1 -size 1g -state online -type RW -policy default -unix-permissions ---rwxr-xr-x -space-guarantee none -snapshot-policy none -security-style unix -encrypt false -func (d Client) VolumeCreate( - name, aggregateName, size, spaceReserve, snapshotPolicy, unixPermissions, - exportPolicy, securityStyle string, encrypt bool, snapshotReserve int, -) (*azgo.VolumeCreateResponse, error) { - request := azgo.NewVolumeCreateRequest(). - SetVolume(name). - SetContainingAggrName(aggregateName). - SetSize(size). - SetSpaceReserve(spaceReserve). - SetSnapshotPolicy(snapshotPolicy). - SetUnixPermissions(unixPermissions). - SetExportPolicy(exportPolicy). - SetVolumeSecurityStyle(securityStyle). - SetEncrypt(encrypt) - - if snapshotReserve != NumericalValueNotSet { - request.SetPercentageSnapshotReserve(snapshotReserve) - } - - if d.SupportsFeature(NetAppFabricPoolFlexVol) { - request.SetTieringPolicy("none") - } - - response, err := request.ExecuteUsing(d.zr) - return response, err -} - -// VolumeCloneCreate clones a volume from a snapshot -func (d Client) VolumeCloneCreate(name, source, snapshot string) (*azgo.VolumeCloneCreateResponse, error) { - response, err := azgo.NewVolumeCloneCreateRequest(). - SetVolume(name). - SetParentVolume(source). - SetParentSnapshot(snapshot). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeCloneSplitStart splits a cloned volume from its parent -func (d Client) VolumeCloneSplitStart(name string) (*azgo.VolumeCloneSplitStartResponse, error) { - response, err := azgo.NewVolumeCloneSplitStartRequest(). - SetVolume(name). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeDisableSnapshotDirectoryAccess disables access to the ".snapshot" directory -// Disable '.snapshot' to allow official mysql container's chmod-in-init to work -func (d Client) VolumeDisableSnapshotDirectoryAccess(name string) (*azgo.VolumeModifyIterResponse, error) { - volattr := &azgo.VolumeModifyIterRequestAttributes{} - ssattr := azgo.NewVolumeSnapshotAttributesType().SetSnapdirAccessEnabled(false) - volSnapshotAttrs := azgo.NewVolumeAttributesType().SetVolumeSnapshotAttributes(*ssattr) - volattr.SetVolumeAttributes(*volSnapshotAttrs) - - queryattr := &azgo.VolumeModifyIterRequestQuery{} - volidattr := azgo.NewVolumeIdAttributesType().SetName(azgo.VolumeNameType(name)) - volIdAttrs := azgo.NewVolumeAttributesType().SetVolumeIdAttributes(*volidattr) - queryattr.SetVolumeAttributes(*volIdAttrs) - - response, err := azgo.NewVolumeModifyIterRequest(). - SetQuery(*queryattr). - SetAttributes(*volattr). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeExists tests for the existence of a Flexvol -func (d Client) VolumeExists(name string) (bool, error) { - response, err := azgo.NewVolumeSizeRequest(). - SetVolume(name). - ExecuteUsing(d.zr) - - if err != nil { - return false, err - } - - if zerr := NewZapiError(response); !zerr.IsPassed() { - switch zerr.Code() { - case azgo.EOBJECTNOTFOUND, azgo.EVOLUMEDOESNOTEXIST: - return false, nil - default: - return false, zerr - } - } - - return true, nil -} - -// VolumeSize retrieves the size of the specified volume -func (d Client) VolumeSize(name string) (int, error) { - - volAttrs, err := d.VolumeGet(name) - if err != nil { - return 0, err - } - volSpaceAttrs := volAttrs.VolumeSpaceAttributes() - - return volSpaceAttrs.Size(), nil -} - -// VolumeSetSize sets the size of the specified volume -func (d Client) VolumeSetSize(name, newSize string) (*azgo.VolumeSizeResponse, error) { - response, err := azgo.NewVolumeSizeRequest(). - SetVolume(name). - SetNewSize(newSize). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeMount mounts a volume at the specified junction -func (d Client) VolumeMount(name, junctionPath string) (*azgo.VolumeMountResponse, error) { - response, err := azgo.NewVolumeMountRequest(). - SetVolumeName(name). - SetJunctionPath(junctionPath). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeUnmount unmounts a volume from the specified junction -func (d Client) VolumeUnmount(name string, force bool) (*azgo.VolumeUnmountResponse, error) { - response, err := azgo.NewVolumeUnmountRequest(). - SetVolumeName(name). - SetForce(force). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeOffline offlines a volume -func (d Client) VolumeOffline(name string) (*azgo.VolumeOfflineResponse, error) { - response, err := azgo.NewVolumeOfflineRequest(). - SetName(name). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeDestroy destroys a volume -func (d Client) VolumeDestroy(name string, force bool) (*azgo.VolumeDestroyResponse, error) { - response, err := azgo.NewVolumeDestroyRequest(). - SetName(name). - SetUnmountAndOffline(force). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeGet returns all relevant details for a single Flexvol -// equivalent to filer::> volume show -func (d Client) VolumeGet(name string) (*azgo.VolumeAttributesType, error) { - - // Limit the Flexvols to the one matching the name - queryVolIDAttrs := azgo.NewVolumeIdAttributesType(). - SetName(azgo.VolumeNameType(name)). - SetStyleExtended("flexvol") - return d.volumeGetIterCommon(name, queryVolIDAttrs) -} - -func (d Client) volumeGetIterCommon(name string, - queryVolIDAttrs *azgo.VolumeIdAttributesType) (*azgo.VolumeAttributesType, error) { - - queryVolStateAttrs := azgo.NewVolumeStateAttributesType().SetState("online") - - query := &azgo.VolumeGetIterRequestQuery{} - volAttrs := azgo.NewVolumeAttributesType(). - SetVolumeIdAttributes(*queryVolIDAttrs). - SetVolumeStateAttributes(*queryVolStateAttrs) - query.SetVolumeAttributes(*volAttrs) - - response, err := azgo.NewVolumeGetIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - ExecuteUsing(d.zr) - - if err != nil { - return &azgo.VolumeAttributesType{}, err - } else if response.Result.NumRecords() == 0 { - return &azgo.VolumeAttributesType{}, fmt.Errorf("flexvol %s not found", name) - } else if response.Result.NumRecords() > 1 { - return &azgo.VolumeAttributesType{}, fmt.Errorf("more than one Flexvol %s found", name) - } else if response.Result.AttributesListPtr == nil { - return &azgo.VolumeAttributesType{}, fmt.Errorf("flexvol %s not found", name) - } else if response.Result.AttributesListPtr.VolumeAttributesPtr != nil { - return &response.Result.AttributesListPtr.VolumeAttributesPtr[0], nil - } - return &azgo.VolumeAttributesType{}, fmt.Errorf("flexvol %s not found", name) -} - -// VolumeGetAll returns all relevant details for all FlexVols whose names match the supplied prefix -// equivalent to filer::> volume show -func (d Client) VolumeGetAll(prefix string) (response *azgo.VolumeGetIterResponse, err error) { - - // Limit the Flexvols to those matching the name prefix - queryVolIDAttrs := azgo.NewVolumeIdAttributesType(). - SetName(azgo.VolumeNameType(prefix + "*")). - SetStyleExtended("flexvol") - queryVolStateAttrs := azgo.NewVolumeStateAttributesType().SetState("online") - - return d.volumeGetIterAll(prefix, queryVolIDAttrs, queryVolStateAttrs) -} - -func (d Client) volumeGetIterAll(prefix string, queryVolIDAttrs *azgo.VolumeIdAttributesType, - queryVolStateAttrs *azgo.VolumeStateAttributesType) (*azgo.VolumeGetIterResponse, error) { - - query := &azgo.VolumeGetIterRequestQuery{} - volumeAttributes := azgo.NewVolumeAttributesType(). - SetVolumeIdAttributes(*queryVolIDAttrs). - SetVolumeStateAttributes(*queryVolStateAttrs) - query.SetVolumeAttributes(*volumeAttributes) - - // Limit the returned data to only the data relevant to containers - desiredVolExportAttrs := azgo.NewVolumeExportAttributesType(). - SetPolicy("") - desiredVolIDAttrs := azgo.NewVolumeIdAttributesType(). - SetName(""). - SetContainingAggregateName("") - desiredVolSecurityUnixAttrs := azgo.NewVolumeSecurityUnixAttributesType(). - SetPermissions("") - desiredVolSecurityAttrs := azgo.NewVolumeSecurityAttributesType(). - SetVolumeSecurityUnixAttributes(*desiredVolSecurityUnixAttrs) - desiredVolSpaceAttrs := azgo.NewVolumeSpaceAttributesType(). - SetSize(0). - SetSpaceGuarantee("") - desiredVolSnapshotAttrs := azgo.NewVolumeSnapshotAttributesType(). - SetSnapdirAccessEnabled(true). - SetSnapshotPolicy("") - - desiredAttributes := &azgo.VolumeGetIterRequestDesiredAttributes{} - desiredVolumeAttributes := azgo.NewVolumeAttributesType(). - SetVolumeExportAttributes(*desiredVolExportAttrs). - SetVolumeIdAttributes(*desiredVolIDAttrs). - SetVolumeSecurityAttributes(*desiredVolSecurityAttrs). - SetVolumeSpaceAttributes(*desiredVolSpaceAttrs). - SetVolumeSnapshotAttributes(*desiredVolSnapshotAttrs) - desiredAttributes.SetVolumeAttributes(*desiredVolumeAttributes) - - response, err := azgo.NewVolumeGetIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeList returns the names of all Flexvols whose names match the supplied prefix -func (d Client) VolumeList(prefix string) (*azgo.VolumeGetIterResponse, error) { - - // Limit the Flexvols to those matching the name prefix - query := &azgo.VolumeGetIterRequestQuery{} - queryVolIDAttrs := azgo.NewVolumeIdAttributesType(). - SetName(azgo.VolumeNameType(prefix + "*")). - SetStyleExtended("flexvol") - queryVolStateAttrs := azgo.NewVolumeStateAttributesType().SetState("online") - volumeAttributes := azgo.NewVolumeAttributesType(). - SetVolumeIdAttributes(*queryVolIDAttrs). - SetVolumeStateAttributes(*queryVolStateAttrs) - query.SetVolumeAttributes(*volumeAttributes) - - // Limit the returned data to only the Flexvol names - desiredAttributes := &azgo.VolumeGetIterRequestDesiredAttributes{} - desiredVolIDAttrs := azgo.NewVolumeIdAttributesType().SetName("") - desiredVolumeAttributes := azgo.NewVolumeAttributesType().SetVolumeIdAttributes(*desiredVolIDAttrs) - desiredAttributes.SetVolumeAttributes(*desiredVolumeAttributes) - - response, err := azgo.NewVolumeGetIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeListByAttrs returns the names of all Flexvols matching the specified attributes -func (d Client) VolumeListByAttrs( - prefix, aggregate, spaceReserve, snapshotPolicy string, snapshotDir bool, encrypt bool, -) (*azgo.VolumeGetIterResponse, error) { - - // Limit the Flexvols to those matching the specified attributes - query := &azgo.VolumeGetIterRequestQuery{} - queryVolIDAttrs := azgo.NewVolumeIdAttributesType(). - SetName(azgo.VolumeNameType(prefix + "*")). - SetContainingAggregateName(aggregate). - SetStyleExtended("flexvol") - queryVolSpaceAttrs := azgo.NewVolumeSpaceAttributesType(). - SetSpaceGuarantee(spaceReserve) - queryVolSnapshotAttrs := azgo.NewVolumeSnapshotAttributesType(). - SetSnapshotPolicy(snapshotPolicy). - SetSnapdirAccessEnabled(snapshotDir) - queryVolStateAttrs := azgo.NewVolumeStateAttributesType(). - SetState("online") - volumeAttributes := azgo.NewVolumeAttributesType(). - SetVolumeIdAttributes(*queryVolIDAttrs). - SetVolumeSpaceAttributes(*queryVolSpaceAttrs). - SetVolumeSnapshotAttributes(*queryVolSnapshotAttrs). - SetVolumeStateAttributes(*queryVolStateAttrs). - SetEncrypt(encrypt) - query.SetVolumeAttributes(*volumeAttributes) - - // Limit the returned data to only the Flexvol names - desiredAttributes := &azgo.VolumeGetIterRequestDesiredAttributes{} - desiredVolIDAttrs := azgo.NewVolumeIdAttributesType().SetName("") - desiredVolumeAttributes := azgo.NewVolumeAttributesType().SetVolumeIdAttributes(*desiredVolIDAttrs) - desiredAttributes.SetVolumeAttributes(*desiredVolumeAttributes) - - response, err := azgo.NewVolumeGetIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -// VolumeListAllBackedBySnapshot returns the names of all FlexVols backed by the specified snapshot -func (d Client) VolumeListAllBackedBySnapshot(volumeName, snapshotName string) ([]string, error) { - - // Limit the Flexvols to those matching the specified attributes - query := &azgo.VolumeGetIterRequestQuery{} - queryVolCloneParentAttrs := azgo.NewVolumeCloneParentAttributesType(). - SetName(volumeName). - SetSnapshotName(snapshotName) - queryVolCloneAttrs := azgo.NewVolumeCloneAttributesType(). - SetVolumeCloneParentAttributes(*queryVolCloneParentAttrs) - volumeAttributes := azgo.NewVolumeAttributesType(). - SetVolumeCloneAttributes(*queryVolCloneAttrs) - query.SetVolumeAttributes(*volumeAttributes) - - // Limit the returned data to only the Flexvol names - desiredAttributes := &azgo.VolumeGetIterRequestDesiredAttributes{} - desiredVolIDAttrs := azgo.NewVolumeIdAttributesType().SetName("") - desiredVolumeAttributes := azgo.NewVolumeAttributesType().SetVolumeIdAttributes(*desiredVolIDAttrs) - desiredAttributes.SetVolumeAttributes(*desiredVolumeAttributes) - - response, err := azgo.NewVolumeGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - - if err = GetError(response, err); err != nil { - return nil, fmt.Errorf("error enumerating volumes backed by snapshot: %v", err) - } - - volumeNames := make([]string, 0) - - if response.Result.AttributesListPtr != nil { - for _, volAttrs := range response.Result.AttributesListPtr.VolumeAttributesPtr { - volIDAttrs := volAttrs.VolumeIdAttributes() - volumeNames = append(volumeNames, string(volIDAttrs.Name())) - } - } - - return volumeNames, nil -} - -// VolumeRename changes the name of a FlexVol (but not a FlexGroup!) -func (d Client) VolumeRename(volumeName, newVolumeName string) (*azgo.VolumeRenameResponse, error) { - response, err := azgo.NewVolumeRenameRequest(). - SetVolume(volumeName). - SetNewVolumeName(newVolumeName). - ExecuteUsing(d.zr) - return response, err -} - -// VOLUME operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// QTREE operations BEGIN - -// QtreeCreate creates a qtree with the specified options -// equivalent to filer::> qtree create -vserver ndvp_vs -volume v -qtree q -export-policy default -unix-permissions ---rwxr-xr-x -security-style unix -func (d Client) QtreeCreate(name, volumeName, unixPermissions, exportPolicy, - securityStyle string) (*azgo.QtreeCreateResponse, error) { - response, err := azgo.NewQtreeCreateRequest(). - SetQtree(name). - SetVolume(volumeName). - SetMode(unixPermissions). - SetSecurityStyle(securityStyle). - SetExportPolicy(exportPolicy). - ExecuteUsing(d.zr) - return response, err -} - -// QtreeRename renames a qtree -// equivalent to filer::> volume qtree rename -func (d Client) QtreeRename(path, newPath string) (*azgo.QtreeRenameResponse, error) { - response, err := azgo.NewQtreeRenameRequest(). - SetQtree(path). - SetNewQtreeName(newPath). - ExecuteUsing(d.zr) - return response, err -} - -// QtreeDestroyAsync destroys a qtree in the background -// equivalent to filer::> volume qtree delete -foreground false -func (d Client) QtreeDestroyAsync(path string, force bool) (*azgo.QtreeDeleteAsyncResponse, error) { - response, err := azgo.NewQtreeDeleteAsyncRequest(). - SetQtree(path). - SetForce(force). - ExecuteUsing(d.zr) - return response, err -} - -// QtreeList returns the names of all Qtrees whose names match the supplied prefix -// equivalent to filer::> volume qtree show -func (d Client) QtreeList(prefix, volumePrefix string) (*azgo.QtreeListIterResponse, error) { - - // Limit the qtrees to those matching the Flexvol and Qtree name prefixes - query := &azgo.QtreeListIterRequestQuery{} - queryInfo := azgo.NewQtreeInfoType().SetVolume(volumePrefix + "*").SetQtree(prefix + "*") - query.SetQtreeInfo(*queryInfo) - - // Limit the returned data to only the Flexvol and Qtree names - desiredAttributes := &azgo.QtreeListIterRequestDesiredAttributes{} - desiredInfo := azgo.NewQtreeInfoType().SetVolume("").SetQtree("") - desiredAttributes.SetQtreeInfo(*desiredInfo) - - response, err := azgo.NewQtreeListIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -// QtreeCount returns the number of Qtrees in the specified Flexvol, not including the Flexvol itself -func (d Client) QtreeCount(volume string) (int, error) { - - // Limit the qtrees to those in the specified Flexvol - query := &azgo.QtreeListIterRequestQuery{} - queryInfo := azgo.NewQtreeInfoType().SetVolume(volume) - query.SetQtreeInfo(*queryInfo) - - // Limit the returned data to only the Flexvol and Qtree names - desiredAttributes := &azgo.QtreeListIterRequestDesiredAttributes{} - desiredInfo := azgo.NewQtreeInfoType().SetVolume("").SetQtree("") - desiredAttributes.SetQtreeInfo(*desiredInfo) - - response, err := azgo.NewQtreeListIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - - if err = GetError(response, err); err != nil { - return 0, err - } - - // There will always be one qtree for the Flexvol, so decrement by 1 - switch response.Result.NumRecords() { - case 0: - fallthrough - case 1: - return 0, nil - default: - return response.Result.NumRecords() - 1, nil - } -} - -// QtreeExists returns true if the named Qtree exists (and is unique in the matching Flexvols) -func (d Client) QtreeExists(name, volumePrefix string) (bool, string, error) { - - // Limit the qtrees to those matching the Flexvol and Qtree name prefixes - query := &azgo.QtreeListIterRequestQuery{} - queryInfo := azgo.NewQtreeInfoType().SetVolume(volumePrefix + "*").SetQtree(name) - query.SetQtreeInfo(*queryInfo) - - // Limit the returned data to only the Flexvol and Qtree names - desiredAttributes := &azgo.QtreeListIterRequestDesiredAttributes{} - desiredInfo := azgo.NewQtreeInfoType().SetVolume("").SetQtree("") - desiredAttributes.SetQtreeInfo(*desiredInfo) - - response, err := azgo.NewQtreeListIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - - // Ensure the API call succeeded - if err = GetError(response, err); err != nil { - return false, "", err - } - - // Ensure qtree is unique - if response.Result.NumRecords() != 1 { - return false, "", nil - } - - if response.Result.AttributesListPtr == nil { - return false, "", nil - } - - // Get containing Flexvol - flexvol := response.Result.AttributesListPtr.QtreeInfoPtr[0].Volume() - - return true, flexvol, nil -} - -// QtreeGet returns all relevant details for a single qtree -// equivalent to filer::> volume qtree show -func (d Client) QtreeGet(name, volumePrefix string) (*azgo.QtreeInfoType, error) { - - // Limit the qtrees to those matching the Flexvol and Qtree name prefixes - query := &azgo.QtreeListIterRequestQuery{} - info := azgo.NewQtreeInfoType().SetVolume(volumePrefix + "*").SetQtree(name) - query.SetQtreeInfo(*info) - - response, err := azgo.NewQtreeListIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - ExecuteUsing(d.zr) - - if err != nil { - return &azgo.QtreeInfoType{}, err - } else if response.Result.NumRecords() == 0 { - return &azgo.QtreeInfoType{}, fmt.Errorf("qtree %s not found", name) - } else if response.Result.NumRecords() > 1 { - return &azgo.QtreeInfoType{}, fmt.Errorf("more than one qtree %s found", name) - } else if response.Result.AttributesListPtr == nil { - return &azgo.QtreeInfoType{}, fmt.Errorf("qtree %s not found", name) - } else if response.Result.AttributesListPtr.QtreeInfoPtr != nil { - return &response.Result.AttributesListPtr.QtreeInfoPtr[0], nil - } - return &azgo.QtreeInfoType{}, fmt.Errorf("qtree %s not found", name) -} - -// QtreeGetAll returns all relevant details for all qtrees whose Flexvol names match the supplied prefix -// equivalent to filer::> volume qtree show -func (d Client) QtreeGetAll(volumePrefix string) (*azgo.QtreeListIterResponse, error) { - - // Limit the qtrees to those matching the Flexvol name prefix - query := &azgo.QtreeListIterRequestQuery{} - info := azgo.NewQtreeInfoType().SetVolume(volumePrefix + "*") - query.SetQtreeInfo(*info) - - // Limit the returned data to only the data relevant to containers - desiredAttributes := &azgo.QtreeListIterRequestDesiredAttributes{} - desiredInfo := azgo.NewQtreeInfoType(). - SetVolume(""). - SetQtree(""). - SetSecurityStyle(""). - SetMode(""). - SetExportPolicy("") - desiredAttributes.SetQtreeInfo(*desiredInfo) - - response, err := azgo.NewQtreeListIterRequest(). - SetMaxRecords(d.config.ContextBasedZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -// QuotaOn enables quotas on a Flexvol -// equivalent to filer::> volume quota on -func (d Client) QuotaOn(volume string) (*azgo.QuotaOnResponse, error) { - response, err := azgo.NewQuotaOnRequest(). - SetVolume(volume). - ExecuteUsing(d.zr) - return response, err -} - -// QuotaOff disables quotas on a Flexvol -// equivalent to filer::> volume quota off -func (d Client) QuotaOff(volume string) (*azgo.QuotaOffResponse, error) { - response, err := azgo.NewQuotaOffRequest(). - SetVolume(volume). - ExecuteUsing(d.zr) - return response, err -} - -// QuotaResize resizes quotas on a Flexvol -// equivalent to filer::> volume quota resize -func (d Client) QuotaResize(volume string) (*azgo.QuotaResizeResponse, error) { - response, err := azgo.NewQuotaResizeRequest(). - SetVolume(volume). - ExecuteUsing(d.zr) - return response, err -} - -// QuotaStatus returns the quota status for a Flexvol -// equivalent to filer::> volume quota show -func (d Client) QuotaStatus(volume string) (*azgo.QuotaStatusResponse, error) { - response, err := azgo.NewQuotaStatusRequest(). - SetVolume(volume). - ExecuteUsing(d.zr) - return response, err -} - -// QuotaSetEntry creates a new quota rule with an optional hard disk limit -// equivalent to filer::> volume quota policy rule create -func (d Client) QuotaSetEntry(qtreeName, volumeName, quotaTarget, quotaType, diskLimit string) (*azgo.QuotaSetEntryResponse, error) { - - request := azgo.NewQuotaSetEntryRequest(). - SetQtree(qtreeName). - SetVolume(volumeName). - SetQuotaTarget(quotaTarget). - SetQuotaType(quotaType) - - // To create a default quota rule, pass an empty disk limit - if diskLimit != "" { - request.SetDiskLimit(diskLimit) - } - - response, err := request.ExecuteUsing(d.zr) - return response, err -} - -// QuotaEntryGet returns the disk limit for a single qtree -// equivalent to filer::> volume quota policy rule show -func (d Client) QuotaGetEntry(target string) (*azgo.QuotaEntryType, error) { - - query := &azgo.QuotaListEntriesIterRequestQuery{} - quotaEntry := azgo.NewQuotaEntryType().SetQuotaType("tree").SetQuotaTarget(target) - query.SetQuotaEntry(*quotaEntry) - - // Limit the returned data to only the disk limit - desiredAttributes := &azgo.QuotaListEntriesIterRequestDesiredAttributes{} - desiredQuotaEntryFields := azgo.NewQuotaEntryType().SetDiskLimit("").SetQuotaTarget("") - desiredAttributes.SetQuotaEntry(*desiredQuotaEntryFields) - - response, err := azgo.NewQuotaListEntriesIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - - if err != nil { - return &azgo.QuotaEntryType{}, err - } else if response.Result.NumRecords() == 0 { - return &azgo.QuotaEntryType{}, fmt.Errorf("tree quota for %s not found", target) - } else if response.Result.NumRecords() > 1 { - return &azgo.QuotaEntryType{}, fmt.Errorf("more than one tree quota for %s found", target) - } else if response.Result.AttributesListPtr == nil { - return &azgo.QuotaEntryType{}, fmt.Errorf("tree quota for %s not found", target) - } else if response.Result.AttributesListPtr.QuotaEntryPtr != nil { - return &response.Result.AttributesListPtr.QuotaEntryPtr[0], nil - } - return &azgo.QuotaEntryType{}, fmt.Errorf("tree quota for %s not found", target) -} - -// QuotaEntryList returns the disk limit quotas for a Flexvol -// equivalent to filer::> volume quota policy rule show -func (d Client) QuotaEntryList(volume string) (*azgo.QuotaListEntriesIterResponse, error) { - query := &azgo.QuotaListEntriesIterRequestQuery{} - quotaEntry := azgo.NewQuotaEntryType().SetVolume(volume).SetQuotaType("tree") - query.SetQuotaEntry(*quotaEntry) - - // Limit the returned data to only the disk limit - desiredAttributes := &azgo.QuotaListEntriesIterRequestDesiredAttributes{} - desiredQuotaEntryFields := azgo.NewQuotaEntryType().SetDiskLimit("").SetQuotaTarget("") - desiredAttributes.SetQuotaEntry(*desiredQuotaEntryFields) - - response, err := azgo.NewQuotaListEntriesIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.zr) - return response, err -} - -// QTREE operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// EXPORT POLICY operations BEGIN - -// ExportPolicyCreate creates an export policy -// equivalent to filer::> vserver export-policy create -func (d Client) ExportPolicyCreate(policy string) (*azgo.ExportPolicyCreateResponse, error) { - response, err := azgo.NewExportPolicyCreateRequest(). - SetPolicyName(azgo.ExportPolicyNameType(policy)). - ExecuteUsing(d.zr) - return response, err -} - -// ExportRuleCreate creates a rule in an export policy -// equivalent to filer::> vserver export-policy rule create -func (d Client) ExportRuleCreate( - policy, clientMatch string, - protocols, roSecFlavors, rwSecFlavors, suSecFlavors []string, -) (*azgo.ExportRuleCreateResponse, error) { - - protocolTypes := &azgo.ExportRuleCreateRequestProtocol{} - var protocolTypesToUse []azgo.AccessProtocolType - for _, p := range protocols { - protocolTypesToUse = append(protocolTypesToUse, azgo.AccessProtocolType(p)) - } - protocolTypes.AccessProtocolPtr = protocolTypesToUse - - roSecFlavorTypes := &azgo.ExportRuleCreateRequestRoRule{} - var roSecFlavorTypesToUse []azgo.SecurityFlavorType - for _, f := range roSecFlavors { - roSecFlavorTypesToUse = append(roSecFlavorTypesToUse, azgo.SecurityFlavorType(f)) - } - roSecFlavorTypes.SecurityFlavorPtr = roSecFlavorTypesToUse - - rwSecFlavorTypes := &azgo.ExportRuleCreateRequestRwRule{} - var rwSecFlavorTypesToUse []azgo.SecurityFlavorType - for _, f := range rwSecFlavors { - rwSecFlavorTypesToUse = append(rwSecFlavorTypesToUse, azgo.SecurityFlavorType(f)) - } - rwSecFlavorTypes.SecurityFlavorPtr = rwSecFlavorTypesToUse - - suSecFlavorTypes := &azgo.ExportRuleCreateRequestSuperUserSecurity{} - var suSecFlavorTypesToUse []azgo.SecurityFlavorType - for _, f := range suSecFlavors { - suSecFlavorTypesToUse = append(suSecFlavorTypesToUse, azgo.SecurityFlavorType(f)) - } - suSecFlavorTypes.SecurityFlavorPtr = suSecFlavorTypesToUse - - response, err := azgo.NewExportRuleCreateRequest(). - SetPolicyName(azgo.ExportPolicyNameType(policy)). - SetClientMatch(clientMatch). - SetProtocol(*protocolTypes). - SetRoRule(*roSecFlavorTypes). - SetRwRule(*rwSecFlavorTypes). - SetSuperUserSecurity(*suSecFlavorTypes). - ExecuteUsing(d.zr) - return response, err -} - -// ExportRuleGetIterRequest returns the export rules in an export policy -// equivalent to filer::> vserver export-policy rule show -func (d Client) ExportRuleGetIterRequest(policy string) (*azgo.ExportRuleGetIterResponse, error) { - - // Limit the qtrees to those matching the Flexvol and Qtree name prefixes - query := &azgo.ExportRuleGetIterRequestQuery{} - exportRuleInfo := azgo.NewExportRuleInfoType().SetPolicyName(azgo.ExportPolicyNameType(policy)) - query.SetExportRuleInfo(*exportRuleInfo) - - response, err := azgo.NewExportRuleGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - ExecuteUsing(d.zr) - return response, err -} - -// EXPORT POLICY operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// SNAPSHOT operations BEGIN - -// SnapshotCreate creates a snapshot of a volume -func (d Client) SnapshotCreate(snapshotName, volumeName string) (*azgo.SnapshotCreateResponse, error) { - response, err := azgo.NewSnapshotCreateRequest(). - SetSnapshot(snapshotName). - SetVolume(volumeName). - ExecuteUsing(d.zr) - return response, err -} - -// SnapshotList returns the list of snapshots associated with a volume -func (d Client) SnapshotList(volumeName string) (*azgo.SnapshotGetIterResponse, error) { - query := &azgo.SnapshotGetIterRequestQuery{} - snapshotInfo := azgo.NewSnapshotInfoType().SetVolume(volumeName) - query.SetSnapshotInfo(*snapshotInfo) - - response, err := azgo.NewSnapshotGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - ExecuteUsing(d.zr) - return response, err -} - -// SnapshotRestoreVolume restores a volume to a snapshot -func (d Client) SnapshotRestoreVolume(snapshotName, volumeName string) (*azgo.SnapshotRestoreVolumeResponse, error) { - response, err := azgo.NewSnapshotRestoreVolumeRequest(). - SetVolume(volumeName). - SetSnapshot(snapshotName). - SetPreserveLunIds(true). - ExecuteUsing(d.zr) - return response, err -} - -// DeleteSnapshot deletes a snapshot of a volume -func (d Client) SnapshotDelete(snapshotName, volumeName string) (*azgo.SnapshotDeleteResponse, error) { - response, err := azgo.NewSnapshotDeleteRequest(). - SetVolume(volumeName). - SetSnapshot(snapshotName). - SetIgnoreOwners(true). - ExecuteUsing(d.zr) - return response, err -} - -// SNAPSHOT operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// ISCSI operations BEGIN - -// IscsiServiceGetIterRequest returns information about an iSCSI target -func (d Client) IscsiServiceGetIterRequest() (*azgo.IscsiServiceGetIterResponse, error) { - response, err := azgo.NewIscsiServiceGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(d.zr) - return response, err -} - -// IscsiNodeGetNameRequest gets the IQN of the vserver -func (d Client) IscsiNodeGetNameRequest() (*azgo.IscsiNodeGetNameResponse, error) { - response, err := azgo.NewIscsiNodeGetNameRequest().ExecuteUsing(d.zr) - return response, err -} - -// IscsiInterfaceGetIterRequest returns information about the vserver's iSCSI interfaces -func (d Client) IscsiInterfaceGetIterRequest() (*azgo.IscsiInterfaceGetIterResponse, error) { - response, err := azgo.NewIscsiInterfaceGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(d.zr) - return response, err -} - -// ISCSI operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// VSERVER operations BEGIN - -// VserverGetIterRequest returns the vservers on the system -// equivalent to filer::> vserver show -func (d Client) VserverGetIterRequest() (*azgo.VserverGetIterResponse, error) { - response, err := azgo.NewVserverGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(d.zr) - return response, err -} - -// VserverGetIterAdminRequest returns vservers of type "admin" on the system. -// equivalent to filer::> vserver show -type admin -func (d Client) VserverGetIterAdminRequest() (*azgo.VserverGetIterResponse, error) { - query := &azgo.VserverGetIterRequestQuery{} - info := azgo.NewVserverInfoType().SetVserverType("admin") - query.SetVserverInfo(*info) - - desiredAttributes := &azgo.VserverGetIterRequestDesiredAttributes{} - desiredInfo := azgo.NewVserverInfoType(). - SetVserverName(""). - SetVserverType("") - desiredAttributes.SetVserverInfo(*desiredInfo) - - response, err := azgo.NewVserverGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - SetDesiredAttributes(*desiredAttributes). - ExecuteUsing(d.GetNontunneledZapiRunner()) - return response, err -} - -// VserverGetRequest returns vserver to which it is sent -// equivalent to filer::> vserver show -func (d Client) VserverGetRequest() (*azgo.VserverGetResponse, error) { - response, err := azgo.NewVserverGetRequest().ExecuteUsing(d.zr) - return response, err -} - -// VserverGetAggregateNames returns an array of names of the aggregates assigned to the configured vserver. -// The vserver-get-iter API works with either cluster or vserver scope, so the ZAPI runner may or may not -// be configured for tunneling; using the query parameter ensures we address only the configured vserver. -func (d Client) VserverGetAggregateNames() ([]string, error) { - - // Get just the SVM of interest - query := &azgo.VserverGetIterRequestQuery{} - info := azgo.NewVserverInfoType().SetVserverName(d.config.SVM) - query.SetVserverInfo(*info) - - response, err := azgo.NewVserverGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - ExecuteUsing(d.zr) - - if err != nil { - return nil, err - } - if response.Result.NumRecords() != 1 { - return nil, fmt.Errorf("could not find SVM %s", d.config.SVM) - } - - // Get the aggregates assigned to the SVM - aggrNames := make([]string, 0, 10) - if response.Result.AttributesListPtr != nil { - for _, vserver := range response.Result.AttributesListPtr.VserverInfoPtr { - if vserver.VserverAggrInfoListPtr != nil { - for _, aggr := range vserver.VserverAggrInfoList().VserverAggrInfoPtr { - aggrNames = append(aggrNames, string(aggr.AggrName())) - } - } - } - } - - return aggrNames, nil -} - -// VserverShowAggrGetIterRequest returns the aggregates on the vserver. Requires ONTAP 9 or later. -// equivalent to filer::> vserver show-aggregates -func (d Client) VserverShowAggrGetIterRequest() (*azgo.VserverShowAggrGetIterResponse, error) { - - response, err := azgo.NewVserverShowAggrGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(d.zr) - return response, err -} - -// VSERVER operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// AGGREGATE operations BEGIN - -// AggrSpaceGetIterRequest returns the aggregates on the system -// equivalent to filer::> storage aggregate show-space -aggregate-name aggregate -func (d Client) AggrSpaceGetIterRequest(aggregateName string) (*azgo.AggrSpaceGetIterResponse, error) { - zr := d.GetNontunneledZapiRunner() - - query := &azgo.AggrSpaceGetIterRequestQuery{} - querySpaceInformation := azgo.NewSpaceInformationType() - if aggregateName != "" { - querySpaceInformation.SetAggregate(aggregateName) - } - query.SetSpaceInformation(*querySpaceInformation) - - responseAggrSpace, err := azgo.NewAggrSpaceGetIterRequest(). - SetQuery(*query). - ExecuteUsing(zr) - return responseAggrSpace, err -} - -func (d Client) getAggregateSize(aggregateName string) (int, error) { - // First, lookup the aggregate and it's space used - aggregateSizeTotal := NumericalValueNotSet - - responseAggrSpace, err := d.AggrSpaceGetIterRequest(aggregateName) - if err = GetError(responseAggrSpace, err); err != nil { - return NumericalValueNotSet, fmt.Errorf("error getting size for aggregate %v: %v", aggregateName, err) - } - - if responseAggrSpace.Result.AttributesListPtr != nil { - for _, aggrSpace := range responseAggrSpace.Result.AttributesListPtr.SpaceInformationPtr { - aggregateSizeTotal = aggrSpace.AggregateSize() - return aggregateSizeTotal, nil - } - } - - return aggregateSizeTotal, fmt.Errorf("error getting size for aggregate %v", aggregateName) -} - -type AggregateCommitment struct { - AggregateSize float64 - TotalAllocated float64 -} - -func (o *AggregateCommitment) Percent() float64 { - committedPercent := (o.TotalAllocated / float64(o.AggregateSize)) * 100.0 - return committedPercent -} - -func (o *AggregateCommitment) PercentWithRequestedSize(requestedSize float64) float64 { - committedPercent := ((o.TotalAllocated + requestedSize) / float64(o.AggregateSize)) * 100.0 - return committedPercent -} - -func (o AggregateCommitment) String() string { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("%s: %1.f ", "AggregateSize", o.AggregateSize)) - buffer.WriteString(fmt.Sprintf("%s: %1.f ", "TotalAllocated", o.TotalAllocated)) - buffer.WriteString(fmt.Sprintf("%s: %.2f %%", "Percent", o.Percent())) - return buffer.String() -} - -// AggregateCommitmentPercentage returns the allocated capacity percentage for an aggregate -// See also; https://practical-admin.com/blog/netapp-powershell-toolkit-aggregate-overcommitment-report/ -func (d Client) AggregateCommitment(aggregate string) (*AggregateCommitment, error) { - - zr := d.GetNontunneledZapiRunner() - - // first, get the aggregate's size - aggregateSize, err := d.getAggregateSize(aggregate) - if err != nil { - return nil, err - } - - // now, get all of the aggregate's volumes - query := &azgo.VolumeGetIterRequestQuery{} - queryVolIDAttrs := azgo.NewVolumeIdAttributesType(). - SetContainingAggregateName(aggregate) - queryVolSpaceAttrs := azgo.NewVolumeSpaceAttributesType() - volumeAttributes := azgo.NewVolumeAttributesType(). - SetVolumeIdAttributes(*queryVolIDAttrs). - SetVolumeSpaceAttributes(*queryVolSpaceAttrs) - query.SetVolumeAttributes(*volumeAttributes) - - response, err := azgo.NewVolumeGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - SetQuery(*query). - ExecuteUsing(zr) - - if err != nil { - return nil, err - } - if err = GetError(response, err); err != nil { - return nil, fmt.Errorf("error enumerating Flexvols: %v", err) - } - - totalAllocated := 0.0 - - // for each of the aggregate's volumes, compute its potential storage usage - if response.Result.AttributesListPtr != nil { - for _, volAttrs := range response.Result.AttributesListPtr.VolumeAttributesPtr { - volIDAttrs := volAttrs.VolumeIdAttributes() - volName := string(volIDAttrs.Name()) - volSpaceAttrs := volAttrs.VolumeSpaceAttributes() - volSisAttrs := volAttrs.VolumeSisAttributes() - volAllocated := float64(volSpaceAttrs.SizeTotal()) - - log.WithFields(log.Fields{ - "volName": volName, - "SizeTotal": volSpaceAttrs.SizeTotal(), - "TotalSpaceSaved": volSisAttrs.TotalSpaceSaved(), - "volAllocated": volAllocated, - }).Info("Dumping volume") - - lunAllocated := 0.0 - lunsResponse, lunsResponseErr := d.LunGetAllForVolume(volName) - if lunsResponseErr != nil { - return nil, lunsResponseErr - } - if lunsResponseErr = GetError(lunsResponse, lunsResponseErr); lunsResponseErr != nil { - return nil, fmt.Errorf("error enumerating LUNs for volume %v: %v", volName, lunsResponseErr) - } - - if lunsResponse.Result.AttributesListPtr != nil && - lunsResponse.Result.AttributesListPtr.LunInfoPtr != nil { - for _, lun := range lunsResponse.Result.AttributesListPtr.LunInfoPtr { - lunPath := lun.Path() - lunSize := lun.Size() - log.WithFields(log.Fields{ - "lunPath": lunPath, - "lunSize": lunSize, - }).Info("Dumping LUN") - lunAllocated += float64(lunSize) - } - } - - if lunAllocated > volAllocated { - totalAllocated += float64(lunAllocated) - } else { - totalAllocated += float64(volAllocated) - } - } - } - - ac := &AggregateCommitment{ - TotalAllocated: totalAllocated, - AggregateSize: float64(aggregateSize), - } - - return ac, nil -} - -// AGGREGATE operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// SNAPMIRROR operations BEGIN - -// SNAPMIRROR operations END -///////////////////////////////////////////////////////////////////////////// - -///////////////////////////////////////////////////////////////////////////// -// MISC operations BEGIN - -// NetInterfaceGet returns the list of network interfaces with associated metadata -// equivalent to filer::> net interface list -func (d Client) NetInterfaceGet() (*azgo.NetInterfaceGetIterResponse, error) { - response, err := azgo.NewNetInterfaceGetIterRequest(). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(d.zr) - return response, err -} - -func (d Client) NetInterfaceGetDataLIFs(protocol string) ([]string, error) { - lifResponse, err := d.NetInterfaceGet() - if err = GetError(lifResponse, err); err != nil { - return nil, fmt.Errorf("error checking network interfaces: %v", err) - } - - dataLIFs := make([]string, 0) - if lifResponse.Result.AttributesListPtr != nil { - for _, attrs := range lifResponse.Result.AttributesListPtr.NetInterfaceInfoPtr { - for _, proto := range attrs.DataProtocols().DataProtocolPtr { - if proto == azgo.DataProtocolType(protocol) { - dataLIFs = append(dataLIFs, string(attrs.Address())) - } - } - } - } - - log.WithField("dataLIFs", dataLIFs).Debug("Data LIFs") - return dataLIFs, nil -} - -// SystemGetVersion returns the system version -// equivalent to filer::> version -func (d Client) SystemGetVersion() (*azgo.SystemGetVersionResponse, error) { - response, err := azgo.NewSystemGetVersionRequest().ExecuteUsing(d.zr) - return response, err -} - -// SystemGetOntapiVersion gets the ONTAPI version using the credentials, and caches & returns the result. -func (d Client) SystemGetOntapiVersion() (string, error) { - - if d.zr.OntapiVersion == "" { - result, err := azgo.NewSystemGetOntapiVersionRequest().ExecuteUsing(d.zr) - if err = GetError(result, err); err != nil { - return "", fmt.Errorf("could not read ONTAPI version: %v", err) - } - - major := result.Result.MajorVersion() - minor := result.Result.MinorVersion() - d.zr.OntapiVersion = fmt.Sprintf("%d.%d", major, minor) - } - - return d.zr.OntapiVersion, nil -} - -func (d Client) NodeListSerialNumbers() ([]string, error) { - - serialNumbers := make([]string, 0, 0) - zr := d.GetNontunneledZapiRunner() - - // Limit the returned data to only the serial numbers - desiredAttributes := &azgo.SystemNodeGetIterRequestDesiredAttributes{} - info := azgo.NewNodeDetailsInfoType().SetNodeSerialNumber("") - desiredAttributes.SetNodeDetailsInfo(*info) - - response, err := azgo.NewSystemNodeGetIterRequest(). - SetDesiredAttributes(*desiredAttributes). - SetMaxRecords(defaultZapiRecords). - ExecuteUsing(zr) - - log.WithFields(log.Fields{ - "response": response, - "info": info, - "desiredAttributes": desiredAttributes, - "err": err, - }).Debug("NodeListSerialNumbers") - - if err = GetError(response, err); err != nil { - return serialNumbers, err - } - - if response.Result.NumRecords() == 0 { - return serialNumbers, errors.New("could not get node info") - } - - // Get the serial numbers - if response.Result.AttributesListPtr != nil { - for _, node := range response.Result.AttributesListPtr.NodeDetailsInfo() { - serialNumber := node.NodeSerialNumber() - if serialNumber != "" { - serialNumbers = append(serialNumbers, serialNumber) - } - } - } - - if len(serialNumbers) == 0 { - return serialNumbers, errors.New("could not get node serial numbers") - } - - log.WithFields(log.Fields{ - "Count": len(serialNumbers), - "SerialNumbers": strings.Join(serialNumbers, ","), - }).Debug("Read serial numbers.") - - return serialNumbers, nil -} - -// EmsAutosupportLog generates an auto support message with the supplied parameters -func (d Client) EmsAutosupportLog( - appVersion string, - autoSupport bool, - category string, - computerName string, - eventDescription string, - eventID int, - eventSource string, - logLevel int) (*azgo.EmsAutosupportLogResponse, error) { - - response, err := azgo.NewEmsAutosupportLogRequest(). - SetAutoSupport(autoSupport). - SetAppVersion(appVersion). - SetCategory(category). - SetComputerName(computerName). - SetEventDescription(eventDescription). - SetEventId(eventID). - SetEventSource(eventSource). - SetLogLevel(logLevel). - ExecuteUsing(d.zr) - return response, err -} - -// MISC operations END -///////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_common.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_common.go deleted file mode 100644 index 602623288..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_common.go +++ /dev/null @@ -1,1622 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package ontap - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "os" - "runtime/debug" - "sort" - "strconv" - "strings" - "time" - - "github.com/cenkalti/backoff" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/storage_drivers/ontap/api" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -const ( - MinimumVolumeSizeBytes = 20971520 // 20 MiB - HousekeepingStartupDelaySecs = 10 -) - -//For legacy reasons, these strings mustn't change -const ( - artifactPrefixDocker = "ndvp" - artifactPrefixKubernetes = "trident" - LUNAttributeFSType = "com.netapp.ndvp.fstype" -) - -type Telemetry struct { - tridentconfig.Telemetry - Plugin string `json:"plugin"` - SVM string `json:"svm"` - StoragePrefix string `json:"storagePrefix"` - Driver StorageDriver `json:"-"` - done chan struct{} - ticker *time.Ticker - stopped bool -} - -type StorageDriver interface { - GetConfig() *drivers.OntapStorageDriverConfig - GetAPI() *api.Client - GetTelemetry() *Telemetry - Name() string -} - -// InitializeOntapConfig parses the ONTAP config, mixing in the specified common config. -func InitializeOntapConfig( - context tridentconfig.DriverContext, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, -) (*drivers.OntapStorageDriverConfig, error) { - - if commonConfig.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "InitializeOntapConfig", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> InitializeOntapConfig") - defer log.WithFields(fields).Debug("<<<< InitializeOntapConfig") - } - - commonConfig.DriverContext = context - - config := &drivers.OntapStorageDriverConfig{} - config.CommonStorageDriverConfig = commonConfig - - // decode configJSON into OntapStorageDriverConfig object - err := json.Unmarshal([]byte(configJSON), &config) - if err != nil { - return nil, fmt.Errorf("could not decode JSON configuration: %v", err) - } - - return config, nil -} - -func NewOntapTelemetry(d StorageDriver) *Telemetry { - t := &Telemetry{ - Plugin: d.Name(), - SVM: d.GetConfig().SVM, - StoragePrefix: *d.GetConfig().StoragePrefix, - Driver: d, - done: make(chan struct{}), - } - - usageHeartbeat := d.GetConfig().UsageHeartbeat - heartbeatIntervalInHours := 24.0 // default to 24 hours - if usageHeartbeat != "" { - f, err := strconv.ParseFloat(usageHeartbeat, 64) - if err != nil { - log.WithField("interval", usageHeartbeat).Warnf("Invalid heartbeat interval. %v", err) - } else { - heartbeatIntervalInHours = f - } - } - log.WithField("intervalHours", heartbeatIntervalInHours).Debug("Configured EMS heartbeat.") - - durationInHours := time.Millisecond * time.Duration(MSecPerHour*heartbeatIntervalInHours) - if durationInHours > 0 { - t.ticker = time.NewTicker(durationInHours) - } - return t -} - -// Start starts the flow of ASUP messages for the driver -// These messages can be viewed via filer::> event log show -severity NOTICE. -func (t *Telemetry) Start() { - go func() { - time.Sleep(HousekeepingStartupDelaySecs * time.Second) - EMSHeartbeat(t.Driver) - for { - select { - case tick := <-t.ticker.C: - log.WithFields(log.Fields{ - "tick": tick, - "driver": t.Driver.Name(), - }).Debug("Sending EMS heartbeat.") - EMSHeartbeat(t.Driver) - case <-t.done: - log.WithFields(log.Fields{ - "driver": t.Driver.Name(), - }).Debugf("Shut down EMS logs for the driver.") - return - } - } - }() -} - -func (t *Telemetry) Stop() { - if t.ticker != nil { - t.ticker.Stop() - } - if !t.stopped { - // calling close on an already closed channel causes a panic, guard against that - close(t.done) - t.stopped = true - } -} - -// GetISCSITargetInfo returns the iSCSI node name and iSCSI interfaces using the provided client's SVM. -func GetISCSITargetInfo( - clientAPI *api.Client, config *drivers.OntapStorageDriverConfig, -) (iSCSINodeName string, iSCSIInterfaces []string, returnError error) { - - // Get the SVM iSCSI IQN - nodeNameResponse, err := clientAPI.IscsiNodeGetNameRequest() - if err != nil { - returnError = fmt.Errorf("could not get SVM iSCSI node name: %v", err) - return - } - iSCSINodeName = nodeNameResponse.Result.NodeName() - - // Get the SVM iSCSI interfaces - interfaceResponse, err := clientAPI.IscsiInterfaceGetIterRequest() - if err != nil { - returnError = fmt.Errorf("could not get SVM iSCSI interfaces: %v", err) - return - } - if interfaceResponse.Result.AttributesListPtr != nil { - for _, iscsiAttrs := range interfaceResponse.Result.AttributesListPtr.IscsiInterfaceListEntryInfoPtr { - if !iscsiAttrs.IsInterfaceEnabled() { - continue - } - iSCSIInterface := fmt.Sprintf("%s:%d", iscsiAttrs.IpAddress(), iscsiAttrs.IpPort()) - iSCSIInterfaces = append(iSCSIInterfaces, iSCSIInterface) - } - } - if len(iSCSIInterfaces) == 0 { - returnError = fmt.Errorf("SVM %s has no active iSCSI interfaces", config.SVM) - return - } - - return -} - -// PopulateOntapLunMapping helper function to fill in volConfig with its LUN mapping values. -func PopulateOntapLunMapping( - clientAPI *api.Client, config *drivers.OntapStorageDriverConfig, - ips []string, volConfig *storage.VolumeConfig, lunID int, -) error { - - var ( - targetIQN string - ) - response, err := clientAPI.IscsiServiceGetIterRequest() - if response.Result.ResultStatusAttr != "passed" || err != nil { - return fmt.Errorf("problem retrieving iSCSI services: %v, %v", - err, response.Result.ResultErrnoAttr) - } - if response.Result.AttributesListPtr != nil { - for _, serviceInfo := range response.Result.AttributesListPtr.IscsiServiceInfoPtr { - if serviceInfo.Vserver() == config.SVM { - targetIQN = serviceInfo.NodeName() - log.WithFields(log.Fields{ - "volume": volConfig.Name, - "targetIQN": targetIQN, - }).Debug("Discovered target IQN for volume.") - break - } - } - } - - volConfig.AccessInfo.IscsiTargetPortal = ips[0] - volConfig.AccessInfo.IscsiPortals = ips[1:] - volConfig.AccessInfo.IscsiTargetIQN = targetIQN - volConfig.AccessInfo.IscsiLunNumber = int32(lunID) - volConfig.AccessInfo.IscsiIgroup = config.IgroupName - log.WithFields(log.Fields{ - "volume": volConfig.Name, - "volume_internal": volConfig.InternalName, - "targetIQN": volConfig.AccessInfo.IscsiTargetIQN, - "lunNumber": volConfig.AccessInfo.IscsiLunNumber, - "igroup": volConfig.AccessInfo.IscsiIgroup, - }).Debug("Mapped ONTAP LUN.") - - return nil -} - -// PublishLUN publishes the volume to the host specified in publishInfo from ontap-san or -// ontap-san-economy. This method may or may not be running on the host where the volume will be -// mounted, so it should limit itself to updating access rules, initiator groups, etc. that require -// some host identity (but not locality) as well as storage controller API access. -func PublishLUN( - clientAPI *api.Client, config *drivers.OntapStorageDriverConfig, ips []string, - publishInfo *utils.VolumePublishInfo, lunPath, igroupName string, iSCSINodeName string, -) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "PublishLUN", - "Type": "ontap_common", - "lunPath": lunPath, - } - log.WithFields(fields).Debug(">>>> PublishLUN") - defer log.WithFields(fields).Debug("<<<< PublishLUN") - } - - var iqn string - var err error - - if publishInfo.Localhost { - - // Lookup local host IQNs - iqns, err := utils.GetInitiatorIqns() - if err != nil { - return fmt.Errorf("error determining host initiator IQN: %v", err) - } else if len(iqns) == 0 { - return errors.New("could not determine host initiator IQN") - } - iqn = iqns[0] - - } else { - - // Host IQN must have been passed in - if len(publishInfo.HostIQN) == 0 { - return errors.New("host initiator IQN not specified") - } - iqn = publishInfo.HostIQN[0] - } - - // Get the fstype - fstype := drivers.DefaultFileSystemType - attrResponse, err := clientAPI.LunGetAttribute(lunPath, LUNAttributeFSType) - if err = api.GetError(attrResponse, err); err != nil { - log.WithFields(log.Fields{ - "LUN": lunPath, - "fstype": fstype, - }).Warn("LUN attribute fstype not found, using default.") - } else { - fstype = attrResponse.Result.Value() - log.WithFields(log.Fields{"LUN": lunPath, "fstype": fstype}).Debug("Found LUN attribute fstype.") - } - - // Add IQN to igroup - igroupAddResponse, err := clientAPI.IgroupAdd(igroupName, iqn) - err = api.GetError(igroupAddResponse, err) - zerr, zerrOK := err.(api.ZapiError) - if err == nil || (zerrOK && zerr.Code() == azgo.EVDISK_ERROR_INITGROUP_HAS_NODE) { - log.WithFields(log.Fields{ - "IQN": iqn, - "igroup": igroupName, - }).Debug("Host IQN already in igroup.") - } else { - return fmt.Errorf("error adding IQN %v to igroup %v: %v", iqn, igroupName, err) - } - - // Map LUN (it may already be mapped) - lunID, err := clientAPI.LunMapIfNotMapped(igroupName, lunPath) - if err != nil { - return err - } - - // Add fields needed by Attach - publishInfo.IscsiLunNumber = int32(lunID) - publishInfo.IscsiTargetPortal = ips[0] - publishInfo.IscsiPortals = ips[1:] - publishInfo.IscsiTargetIQN = iSCSINodeName - publishInfo.IscsiIgroup = igroupName - publishInfo.FilesystemType = fstype - publishInfo.UseCHAP = false - publishInfo.SharedTarget = true - - return nil -} - -// InitializeSANDriver performs common ONTAP SAN driver initialization. -func InitializeSANDriver(context tridentconfig.DriverContext, clientAPI *api.Client, - config *drivers.OntapStorageDriverConfig, validate func() error) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "InitializeSANDriver", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> InitializeSANDriver") - defer log.WithFields(fields).Debug("<<<< InitializeSANDriver") - } - - if config.IgroupName == "" { - config.IgroupName = drivers.GetDefaultIgroupName(context) - } - - // Defer validation to the driver's validate method - if err := validate(); err != nil { - return err - } - - // Create igroup - igroupResponse, err := clientAPI.IgroupCreate(config.IgroupName, "iscsi", "linux") - if err != nil { - return fmt.Errorf("error creating igroup: %v", err) - } - if zerr := api.NewZapiError(igroupResponse); !zerr.IsPassed() { - // Handle case where the igroup already exists - if zerr.Code() != azgo.EVDISK_ERROR_INITGROUP_EXISTS { - return fmt.Errorf("error creating igroup %v: %v", config.IgroupName, zerr) - } - } - if context == tridentconfig.ContextKubernetes { - log.WithFields(log.Fields{ - "driver": drivers.OntapSANStorageDriverName, - "SVM": config.SVM, - "igroup": config.IgroupName, - }).Warn("Please ensure all relevant hosts are added to the initiator group.") - } - - return nil -} - -// InitializeOntapDriver sets up the API client and performs all other initialization tasks -// that are common to all the ONTAP drivers. -func InitializeOntapDriver(config *drivers.OntapStorageDriverConfig) (*api.Client, error) { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "InitializeOntapDriver", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> InitializeOntapDriver") - defer log.WithFields(fields).Debug("<<<< InitializeOntapDriver") - } - - // Splitting config.ManagementLIF with colon allows to provide managementLIF value as address:port format - mgmtLIF := strings.Split(config.ManagementLIF, ":")[0] - - addressesFromHostname, err := net.LookupHost(mgmtLIF) - if err != nil { - log.WithField("ManagementLIF", mgmtLIF).Error("Host lookup failed for ManagementLIF. ", err) - return nil, err - } - - log.WithFields(log.Fields{ - "hostname": mgmtLIF, - "addresses": addressesFromHostname, - }).Debug("Addresses found from ManagementLIF lookup.") - - // Get the API client - client, err := InitializeOntapAPI(config) - if err != nil { - return nil, fmt.Errorf("could not create Data ONTAP API client: %v", err) - } - - // Make sure we're using a valid ONTAP version - ontapi, err := client.SystemGetOntapiVersion() - if err != nil { - return nil, fmt.Errorf("could not determine Data ONTAP API version: %v", err) - } - if !client.SupportsFeature(api.MinimumONTAPIVersion) { - return nil, errors.New("ONTAP 9.1 or later is required") - } - log.WithField("Ontapi", ontapi).Debug("ONTAP API version.") - - // Log cluster node serial numbers if we can get them - config.SerialNumbers, err = client.NodeListSerialNumbers() - if err != nil { - log.Warnf("Could not determine controller serial numbers. %v", err) - } else { - log.WithFields(log.Fields{ - "serialNumbers": strings.Join(config.SerialNumbers, ","), - }).Info("Controller serial numbers.") - } - - // Load default config parameters - err = PopulateConfigurationDefaults(config) - if err != nil { - return nil, fmt.Errorf("could not populate configuration defaults: %v", err) - } - - return client, nil -} - -// InitializeOntapAPI returns an ontap.Client ZAPI client. If the SVM isn't specified in the config -// file, this method attempts to derive the one to use. -func InitializeOntapAPI(config *drivers.OntapStorageDriverConfig) (*api.Client, error) { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "InitializeOntapAPI", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> InitializeOntapAPI") - defer log.WithFields(fields).Debug("<<<< InitializeOntapAPI") - } - - client := api.NewClient(api.ClientConfig{ - ManagementLIF: config.ManagementLIF, - SVM: config.SVM, - Username: config.Username, - Password: config.Password, - DriverContext: config.DriverContext, - DebugTraceFlags: config.DebugTraceFlags, - }) - - if config.SVM != "" { - - vserverResponse, err := client.VserverGetRequest() - if err = api.GetError(vserverResponse, err); err != nil { - return nil, fmt.Errorf("error reading SVM details: %v", err) - } - - client.SVMUUID = string(vserverResponse.Result.AttributesPtr.VserverInfoPtr.Uuid()) - - log.WithField("SVM", config.SVM).Debug("Using specified SVM.") - return client, nil - } - - // Use VserverGetIterRequest to populate config.SVM if it wasn't specified and we can derive it - vserverResponse, err := client.VserverGetIterRequest() - if err = api.GetError(vserverResponse, err); err != nil { - return nil, fmt.Errorf("error enumerating SVMs: %v", err) - } - - if vserverResponse.Result.NumRecords() != 1 { - return nil, errors.New("cannot derive SVM to use; please specify SVM in config file") - } - - // Update everything to use our derived SVM - config.SVM = vserverResponse.Result.AttributesListPtr.VserverInfoPtr[0].VserverName() - svmUUID := string(vserverResponse.Result.AttributesListPtr.VserverInfoPtr[0].Uuid()) - - client = api.NewClient(api.ClientConfig{ - ManagementLIF: config.ManagementLIF, - SVM: config.SVM, - Username: config.Username, - Password: config.Password, - DriverContext: config.DriverContext, - DebugTraceFlags: config.DebugTraceFlags, - }) - client.SVMUUID = svmUUID - - log.WithField("SVM", config.SVM).Debug("Using derived SVM.") - return client, nil -} - -// ValidateSANDriver contains the validation logic shared between ontap-san and ontap-san-economy. -func ValidateSANDriver(api *api.Client, config *drivers.OntapStorageDriverConfig, ips []string) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "ValidateSANDriver", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> ValidateSANDriver") - defer log.WithFields(fields).Debug("<<<< ValidateSANDriver") - } - - // If the user sets the LIF to use in the config, disable multipathing and use just the one IP address - if config.DataLIF != "" { - // Make sure it's actually a valid address - if ip := net.ParseIP(config.DataLIF); nil == ip { - return fmt.Errorf("data LIF is not a valid IP: %s", config.DataLIF) - } - // Make sure the IP matches one of the LIFs - found := false - for _, ip := range ips { - if config.DataLIF == ip { - found = true - break - } - } - if found { - log.WithField("ip", config.DataLIF).Debug("Found matching Data LIF.") - } else { - log.WithField("ip", config.DataLIF).Debug("Could not find matching Data LIF.") - return fmt.Errorf("could not find Data LIF for %s", config.DataLIF) - } - // Replace the IPs with a singleton list - ips = []string{config.DataLIF} - } - - if config.DriverContext == tridentconfig.ContextDocker { - // Make sure this host is logged into the ONTAP iSCSI target - err := utils.EnsureISCSISessions(ips) - if err != nil { - return fmt.Errorf("error establishing iSCSI session: %v", err) - } - } - - return nil -} - -// ValidateNASDriver contains the validation logic shared between ontap-nas and ontap-nas-economy. -func ValidateNASDriver(api *api.Client, config *drivers.OntapStorageDriverConfig) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "ValidateNASDriver", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> ValidateNASDriver") - defer log.WithFields(fields).Debug("<<<< ValidateNASDriver") - } - - dataLIFs, err := api.NetInterfaceGetDataLIFs("nfs") - if err != nil { - return err - } - - if len(dataLIFs) == 0 { - return fmt.Errorf("no NAS data LIFs found on SVM %s", config.SVM) - } else { - log.WithField("dataLIFs", dataLIFs).Debug("Found NAS LIFs.") - } - - // If they didn't set a LIF to use in the config, we'll set it to the first nfs LIF we happen to find - if config.DataLIF == "" { - config.DataLIF = dataLIFs[0] - } else { - _, err := ValidateDataLIF(config.DataLIF, dataLIFs) - if err != nil { - return fmt.Errorf("data LIF validation failed: %v", err) - } - } - - return nil -} - -func ValidateDataLIF(dataLIF string, dataLIFs []string) ([]string, error) { - - addressesFromHostname, err := net.LookupHost(dataLIF) - if err != nil { - log.Error("Host lookup failed. ", err) - return nil, err - } - - log.WithFields(log.Fields{ - "hostname": dataLIF, - "addresses": addressesFromHostname, - }).Debug("Addresses found from hostname lookup.") - - for _, hostNameAddress := range addressesFromHostname { - foundValidLIFAddress := false - - loop: - for _, lifAddress := range dataLIFs { - if lifAddress == hostNameAddress { - foundValidLIFAddress = true - break loop - } - } - if foundValidLIFAddress { - log.WithField("hostNameAddress", hostNameAddress).Debug("Found matching Data LIF.") - } else { - log.WithField("hostNameAddress", hostNameAddress).Debug("Could not find matching Data LIF.") - return nil, fmt.Errorf("could not find Data LIF for %s", hostNameAddress) - } - - } - - return addressesFromHostname, nil -} - -// Enable space-allocation by default. If not enabled, Data ONTAP takes the LUNs offline -// when they're seen as full. -// see: https://github.com/NetApp/trident/issues/135 -const DefaultSpaceAllocation = "true" -const DefaultSpaceReserve = "none" -const DefaultSnapshotPolicy = "none" -const DefaultSnapshotReserve = "" -const DefaultUnixPermissions = "---rwxrwxrwx" -const DefaultSnapshotDir = "false" -const DefaultExportPolicy = "default" -const DefaultSecurityStyle = "unix" -const DefaultNfsMountOptionsDocker = "-o nfsvers=3" -const DefaultNfsMountOptionsKubernetes = "" -const DefaultSplitOnClone = "false" -const DefaultEncryption = "false" -const DefaultLimitAggregateUsage = "" -const DefaultLimitVolumeSize = "" - -// PopulateConfigurationDefaults fills in default values for configuration settings if not supplied in the config file -func PopulateConfigurationDefaults(config *drivers.OntapStorageDriverConfig) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "PopulateConfigurationDefaults", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> PopulateConfigurationDefaults") - defer log.WithFields(fields).Debug("<<<< PopulateConfigurationDefaults") - } - - // Ensure the default volume size is valid, using a "default default" of 1G if not set - if config.Size == "" { - config.Size = drivers.DefaultVolumeSize - } else { - _, err := utils.ConvertSizeToBytes(config.Size) - if err != nil { - return fmt.Errorf("invalid config value for default volume size: %v", err) - } - } - - if config.StoragePrefix == nil { - prefix := drivers.GetDefaultStoragePrefix(config.DriverContext) - config.StoragePrefix = &prefix - } - - if config.SpaceAllocation == "" { - config.SpaceAllocation = DefaultSpaceAllocation - } - - if config.SpaceReserve == "" { - config.SpaceReserve = DefaultSpaceReserve - } - - if config.SnapshotPolicy == "" { - config.SnapshotPolicy = DefaultSnapshotPolicy - } - - if config.SnapshotReserve == "" { - config.SnapshotReserve = DefaultSnapshotReserve - } - - if config.UnixPermissions == "" { - config.UnixPermissions = DefaultUnixPermissions - } - - if config.SnapshotDir == "" { - config.SnapshotDir = DefaultSnapshotDir - } - - if config.ExportPolicy == "" { - config.ExportPolicy = DefaultExportPolicy - } - - if config.SecurityStyle == "" { - config.SecurityStyle = DefaultSecurityStyle - } - - if config.NfsMountOptions == "" { - switch config.DriverContext { - case tridentconfig.ContextDocker: - config.NfsMountOptions = DefaultNfsMountOptionsDocker - default: - config.NfsMountOptions = DefaultNfsMountOptionsKubernetes - } - } - - if config.SplitOnClone == "" { - config.SplitOnClone = DefaultSplitOnClone - } else { - _, err := strconv.ParseBool(config.SplitOnClone) - if err != nil { - return fmt.Errorf("invalid boolean value for splitOnClone: %v", err) - } - } - - if config.FileSystemType == "" { - config.FileSystemType = drivers.DefaultFileSystemType - } - - if config.Encryption == "" { - config.Encryption = DefaultEncryption - } - - if config.LimitAggregateUsage == "" { - config.LimitAggregateUsage = DefaultLimitAggregateUsage - } - - if config.LimitVolumeSize == "" { - config.LimitVolumeSize = DefaultLimitVolumeSize - } - - log.WithFields(log.Fields{ - "StoragePrefix": *config.StoragePrefix, - "SpaceAllocation": config.SpaceAllocation, - "SpaceReserve": config.SpaceReserve, - "SnapshotPolicy": config.SnapshotPolicy, - "SnapshotReserve": config.SnapshotReserve, - "UnixPermissions": config.UnixPermissions, - "SnapshotDir": config.SnapshotDir, - "ExportPolicy": config.ExportPolicy, - "SecurityStyle": config.SecurityStyle, - "NfsMountOptions": config.NfsMountOptions, - "SplitOnClone": config.SplitOnClone, - "FileSystemType": config.FileSystemType, - "Encryption": config.Encryption, - "LimitAggregateUsage": config.LimitAggregateUsage, - "LimitVolumeSize": config.LimitVolumeSize, - "Size": config.Size, - }).Debugf("Configuration defaults") - - return nil -} - -func checkAggregateLimitsForFlexvol( - flexvol string, requestedSizeInt uint64, config drivers.OntapStorageDriverConfig, client *api.Client, -) error { - - var aggregate, spaceReserve string - - volInfo, err := client.VolumeGet(flexvol) - if err != nil { - return err - } - if volInfo.VolumeIdAttributesPtr != nil { - aggregate = volInfo.VolumeIdAttributesPtr.ContainingAggregateName() - } else { - return fmt.Errorf("aggregate info not available from Flexvol %s", flexvol) - } - if volInfo.VolumeSpaceAttributesPtr != nil { - spaceReserve = volInfo.VolumeSpaceAttributesPtr.SpaceGuarantee() - } else { - return fmt.Errorf("spaceReserve info not available from Flexvol %s", flexvol) - } - - return checkAggregateLimits(aggregate, spaceReserve, requestedSizeInt, config, client) -} - -func checkAggregateLimits( - aggregate, spaceReserve string, requestedSizeInt uint64, - config drivers.OntapStorageDriverConfig, client *api.Client, -) error { - - requestedSize := float64(requestedSizeInt) - - limitAggregateUsage := config.LimitAggregateUsage - limitAggregateUsage = strings.Replace(limitAggregateUsage, "%", "", -1) // strip off any % - - log.WithFields(log.Fields{ - "aggregate": aggregate, - "requestedSize": requestedSize, - "limitAggregateUsage": limitAggregateUsage, - }).Debugf("Checking aggregate limits") - - if limitAggregateUsage == "" { - log.Debugf("No limits specified") - return nil - } - - if aggregate == "" { - return errors.New("aggregate not provided, cannot check aggregate provisioning limits") - } - - // lookup aggregate - aggrSpaceResponse, aggrSpaceErr := client.AggrSpaceGetIterRequest(aggregate) - if aggrSpaceErr != nil { - return aggrSpaceErr - } - - // iterate over results - if aggrSpaceResponse.Result.AttributesListPtr != nil { - for _, aggrSpace := range aggrSpaceResponse.Result.AttributesListPtr.SpaceInformationPtr { - aggrName := aggrSpace.Aggregate() - if aggregate != aggrName { - log.Debugf("Skipping " + aggrName) - continue - } - - log.WithFields(log.Fields{ - "aggrName": aggrName, - "size": aggrSpace.AggregateSize(), - "volumeFootprints": aggrSpace.VolumeFootprints(), - "volumeFootprintsPercent": aggrSpace.VolumeFootprintsPercent(), - "usedIncludingSnapshotReserve": aggrSpace.UsedIncludingSnapshotReserve(), - "usedIncludingSnapshotReservePercent": aggrSpace.UsedIncludingSnapshotReservePercent(), - }).Info("Dumping aggregate space") - - if limitAggregateUsage != "" { - percentLimit, parseErr := strconv.ParseFloat(limitAggregateUsage, 64) - if parseErr != nil { - return parseErr - } - - usedIncludingSnapshotReserve := float64(aggrSpace.UsedIncludingSnapshotReserve()) - aggregateSize := float64(aggrSpace.AggregateSize()) - - spaceReserveIsThick := false - if spaceReserve == "volume" { - spaceReserveIsThick = true - } - - if spaceReserveIsThick { - // we SHOULD include the requestedSize in our computation - percentUsedWithRequest := ((usedIncludingSnapshotReserve + requestedSize) / aggregateSize) * 100.0 - log.WithFields(log.Fields{ - "percentUsedWithRequest": percentUsedWithRequest, - "percentLimit": percentLimit, - "spaceReserve": spaceReserve, - }).Debugf("Checking usage percentage limits") - - if percentUsedWithRequest >= percentLimit { - errorMessage := fmt.Sprintf("aggregate usage of %.2f %% would exceed the limit of %.2f %%", - percentUsedWithRequest, percentLimit) - return errors.New(errorMessage) - } - } else { - // we should NOT include the requestedSize in our computation - percentUsedWithoutRequest := ((usedIncludingSnapshotReserve) / aggregateSize) * 100.0 - log.WithFields(log.Fields{ - "percentUsedWithoutRequest": percentUsedWithoutRequest, - "percentLimit": percentLimit, - "spaceReserve": spaceReserve, - }).Debugf("Checking usage percentage limits") - - if percentUsedWithoutRequest >= percentLimit { - errorMessage := fmt.Sprintf("aggregate usage of %.2f %% exceeds the limit of %.2f %%", - percentUsedWithoutRequest, percentLimit) - return errors.New(errorMessage) - } - } - } - - log.Debugf("Request within specicifed limits, going to create.") - return nil - } - } - - return errors.New("could not find aggregate, cannot check aggregate provisioning limits for " + aggregate) -} - -func GetVolumeSize(sizeBytes uint64, config drivers.OntapStorageDriverConfig) (uint64, error) { - - if sizeBytes == 0 { - defaultSize, _ := utils.ConvertSizeToBytes(config.Size) - sizeBytes, _ = strconv.ParseUint(defaultSize, 10, 64) - } - if sizeBytes < MinimumVolumeSizeBytes { - return 0, fmt.Errorf("requested volume size (%d bytes) is too small; "+ - "the minimum volume size is %d bytes", sizeBytes, MinimumVolumeSizeBytes) - } - return sizeBytes, nil -} - -func GetSnapshotReserve(snapshotPolicy, snapshotReserve string) (int, error) { - - if snapshotReserve != "" { - // snapshotReserve defaults to "", so if it is explicitly set - // (either in config or create options), honor the value. - snapshotReserveInt64, err := strconv.ParseInt(snapshotReserve, 10, 64) - if err != nil { - return api.NumericalValueNotSet, err - } - return int(snapshotReserveInt64), nil - } else { - // If snapshotReserve isn't set, then look at snapshotPolicy. If the policy is "none", - // return 0. Otherwise return -1, indicating that ONTAP should use its own default value. - if snapshotPolicy == "none" { - return 0, nil - } else { - return api.NumericalValueNotSet, nil - } - } -} - -// EMSHeartbeat logs an ASUP message on a timer -// view them via filer::> event log show -severity NOTICE -func EMSHeartbeat(driver StorageDriver) { - - // log an informational message on a timer - hostname, err := os.Hostname() - if err != nil { - log.Warnf("Could not determine hostname. %v", err) - hostname = "unknown" - } - - message, _ := json.Marshal(driver.GetTelemetry()) - - emsResponse, err := driver.GetAPI().EmsAutosupportLog( - strconv.Itoa(drivers.ConfigVersion), false, "heartbeat", hostname, - string(message), 1, tridentconfig.OrchestratorName, 5) - - if err = api.GetError(emsResponse, err); err != nil { - log.WithFields(log.Fields{ - "driver": driver.Name(), - "error": err, - }).Error("Error logging EMS message.") - } else { - log.WithField("driver", driver.Name()).Debug("Logged EMS message.") - } -} - -const MSecPerHour = 1000 * 60 * 60 // millis * seconds * minutes - -// probeForVolume polls for the ONTAP volume to appear, with backoff retry logic -func probeForVolume(name string, client *api.Client) error { - checkVolumeExists := func() error { - volExists, err := client.VolumeExists(name) - if err != nil { - return err - } - if !volExists { - return fmt.Errorf("volume %v does not yet exist", name) - } - return nil - } - volumeExistsNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Volume not yet present, waiting.") - } - volumeBackoff := backoff.NewExponentialBackOff() - volumeBackoff.InitialInterval = 1 * time.Second - volumeBackoff.Multiplier = 2 - volumeBackoff.RandomizationFactor = 0.1 - volumeBackoff.MaxElapsedTime = 30 * time.Second - - // Run the volume check using an exponential backoff - if err := backoff.RetryNotify(checkVolumeExists, volumeBackoff, volumeExistsNotify); err != nil { - log.WithField("volume", name).Warnf("Could not find volume after %3.2f seconds.", volumeBackoff.MaxElapsedTime.Seconds()) - return fmt.Errorf("volume %v does not exist", name) - } else { - log.WithField("volume", name).Debug("Volume found.") - return nil - } -} - -// Create a volume clone -func CreateOntapClone( - name, source, snapshot string, split bool, config *drivers.OntapStorageDriverConfig, client *api.Client, -) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateOntapClone", - "Type": "ontap_common", - "name": name, - "source": source, - "snapshot": snapshot, - "split": split, - } - log.WithFields(fields).Debug(">>>> CreateOntapClone") - defer log.WithFields(fields).Debug("<<<< CreateOntapClone") - } - - // If the specified volume already exists, return an error - volExists, err := client.VolumeExists(name) - if err != nil { - return fmt.Errorf("error checking for existing volume: %v", err) - } - if volExists { - return fmt.Errorf("volume %s already exists", name) - } - - // If no specific snapshot was requested, create one - if snapshot == "" { - snapshot = time.Now().UTC().Format(storage.SnapshotNameFormat) - snapResponse, err := client.SnapshotCreate(snapshot, source) - if err = api.GetError(snapResponse, err); err != nil { - return fmt.Errorf("error creating snapshot: %v", err) - } - } - - // Create the clone based on a snapshot - cloneResponse, err := client.VolumeCloneCreate(name, source, snapshot) - if err != nil { - return fmt.Errorf("error creating clone: %v", err) - } - if zerr := api.NewZapiError(cloneResponse); !zerr.IsPassed() { - if zerr.Code() == azgo.EOBJECTNOTFOUND { - return fmt.Errorf("snapshot %s does not exist in volume %s", snapshot, source) - } else if zerr.IsFailedToLoadJobError() { - fields := log.Fields{ - "zerr": zerr, - } - log.WithFields(fields).Warn("Problem encountered during the clone create operation, attempting to verify the clone was actually created") - if volumeLookupError := probeForVolume(name, client); volumeLookupError != nil { - return volumeLookupError - } - } else { - return fmt.Errorf("error creating clone: %v", zerr) - } - } - - if config.StorageDriverName == drivers.OntapNASStorageDriverName { - // Mount the new volume - mountResponse, err := client.VolumeMount(name, "/"+name) - if err = api.GetError(mountResponse, err); err != nil { - return fmt.Errorf("error mounting volume to junction: %v", err) - } - } - - // Split the clone if requested - if split { - splitResponse, err := client.VolumeCloneSplitStart(name) - if err = api.GetError(splitResponse, err); err != nil { - return fmt.Errorf("error splitting clone: %v", err) - } - } - - return nil -} - -// GetSnapshot gets a snapshot. To distinguish between an API error reading the snapshot -// and a non-existent snapshot, this method may return (nil, nil). -func GetSnapshot( - snapConfig *storage.SnapshotConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, - sizeGetter func(string) (int, error), -) (*storage.Snapshot, error) { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshot", - "Type": "ontap_common", - "snapshotName": internalSnapName, - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> GetSnapshot") - defer log.WithFields(fields).Debug("<<<< GetSnapshot") - } - - size, err := sizeGetter(internalVolName) - if err != nil { - return nil, fmt.Errorf("error reading volume size: %v", err) - } - - snapListResponse, err := client.SnapshotList(internalVolName) - if err = api.GetError(snapListResponse, err); err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) - } - - if snapListResponse.Result.AttributesListPtr != nil { - for _, snap := range snapListResponse.Result.AttributesListPtr.SnapshotInfoPtr { - if snap.Name() == internalSnapName { - - log.WithFields(log.Fields{ - "snapshotName": internalSnapName, - "volumeName": internalVolName, - "created": snap.AccessTime(), - }).Debug("Found snapshot.") - - return &storage.Snapshot{ - Config: snapConfig, - Created: time.Unix(int64(snap.AccessTime()), 0).UTC().Format(storage.SnapshotTimestampFormat), - SizeBytes: int64(size), - }, nil - } - } - } - - log.WithFields(log.Fields{ - "snapshotName": internalSnapName, - "volumeName": internalVolName, - }).Warning("Snapshot not found.") - - return nil, nil -} - -// GetSnapshots returns the list of snapshots associated with the named volume. -func GetSnapshots( - volConfig *storage.VolumeConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, - sizeGetter func(string) (int, error), -) ([]*storage.Snapshot, error) { - - internalVolName := volConfig.InternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshotList", - "Type": "ontap_common", - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> GetSnapshotList") - defer log.WithFields(fields).Debug("<<<< GetSnapshotList") - } - - size, err := sizeGetter(internalVolName) - if err != nil { - return nil, fmt.Errorf("error reading volume size: %v", err) - } - - snapListResponse, err := client.SnapshotList(internalVolName) - if err = api.GetError(snapListResponse, err); err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) - } - - log.Debugf("Returned %v snapshots.", snapListResponse.Result.NumRecords()) - snapshots := make([]*storage.Snapshot, 0) - - if snapListResponse.Result.AttributesListPtr != nil { - for _, snap := range snapListResponse.Result.AttributesListPtr.SnapshotInfoPtr { - - log.WithFields(log.Fields{ - "name": snap.Name(), - "accessTime": snap.AccessTime(), - }).Debug("Snapshot") - - snapshot := &storage.Snapshot{ - Config: &storage.SnapshotConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: snap.Name(), - InternalName: snap.Name(), - VolumeName: volConfig.Name, - VolumeInternalName: volConfig.InternalName, - }, - Created: time.Unix(int64(snap.AccessTime()), 0).UTC().Format(storage.SnapshotTimestampFormat), - SizeBytes: int64(size), - } - - snapshots = append(snapshots, snapshot) - } - } - - return snapshots, nil -} - -// CreateSnapshot creates a snapshot for the given volume. -func CreateSnapshot( - snapConfig *storage.SnapshotConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, - sizeGetter func(string) (int, error), -) (*storage.Snapshot, error) { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateSnapshot", - "Type": "ontap_common", - "snapshotName": internalSnapName, - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> CreateSnapshot") - defer log.WithFields(fields).Debug("<<<< CreateSnapshot") - } - - // If the specified volume doesn't exist, return error - volExists, err := client.VolumeExists(internalVolName) - if err != nil { - return nil, fmt.Errorf("error checking for existing volume: %v", err) - } - if !volExists { - return nil, fmt.Errorf("volume %s does not exist", internalVolName) - } - - size, err := sizeGetter(internalVolName) - if err != nil { - return nil, fmt.Errorf("error reading volume size: %v", err) - } - - snapResponse, err := client.SnapshotCreate(internalSnapName, internalVolName) - if err = api.GetError(snapResponse, err); err != nil { - return nil, fmt.Errorf("could not create snapshot: %v", err) - } - - // Fetching list of snapshots to get snapshot access time - snapListResponse, err := client.SnapshotList(internalVolName) - if err = api.GetError(snapListResponse, err); err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) - } - if snapListResponse.Result.AttributesListPtr != nil { - for _, snap := range snapListResponse.Result.AttributesListPtr.SnapshotInfoPtr { - if snap.Name() == internalSnapName { - return &storage.Snapshot{ - Config: snapConfig, - Created: time.Unix(int64(snap.AccessTime()), 0).UTC().Format(storage.SnapshotTimestampFormat), - SizeBytes: int64(size), - }, nil - } - } - } - return nil, fmt.Errorf("could not find snapshot %s for souce volume %s", internalSnapName, internalVolName) -} - -// Restore a volume (in place) from a snapshot. -func RestoreSnapshot( - snapConfig *storage.SnapshotConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, -) error { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "RestoreSnapshot", - "Type": "ontap_common", - "snapshotName": internalSnapName, - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> RestoreSnapshot") - defer log.WithFields(fields).Debug("<<<< RestoreSnapshot") - } - - snapResponse, err := client.SnapshotRestoreVolume(internalSnapName, internalVolName) - if err = api.GetError(snapResponse, err); err != nil { - return fmt.Errorf("error restoring snapshot: %v", err) - } - - log.WithFields(log.Fields{ - "snapshotName": internalSnapName, - "volumeName": internalVolName, - }).Debug("Restored snapshot.") - - return nil -} - -// DeleteSnapshot deletes a single snapshot. -func DeleteSnapshot( - snapConfig *storage.SnapshotConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, -) error { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "DeleteSnapshot", - "Type": "ontap_common", - "snapshotName": internalSnapName, - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> DeleteSnapshot") - defer log.WithFields(fields).Debug("<<<< DeleteSnapshot") - } - - snapResponse, err := client.SnapshotDelete(internalSnapName, internalVolName) - if err != nil { - return fmt.Errorf("error deleting snapshot: %v", err) - } - if zerr := api.NewZapiError(snapResponse); !zerr.IsPassed() { - if zerr.Code() == azgo.ESNAPSHOTBUSY { - // Start a split here before returning the error so a subsequent delete attempt may succeed. - _ = SplitVolumeFromBusySnapshot(snapConfig, config, client) - } - return fmt.Errorf("error deleting snapshot: %v", zerr) - } - - log.WithField("snapshotName", internalSnapName).Debug("Deleted snapshot.") - return nil -} - -// SplitVolumeFromBusySnapshot gets the list of volumes backed by a busy snapshot and starts -// a split operation on the first one (sorted by volume name). -func SplitVolumeFromBusySnapshot( - snapConfig *storage.SnapshotConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, -) error { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "SplitVolumeFromBusySnapshot", - "Type": "ontap_common", - "snapshotName": internalSnapName, - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> SplitVolumeFromBusySnapshot") - defer log.WithFields(fields).Debug("<<<< SplitVolumeFromBusySnapshot") - } - - childVolumes, err := client.VolumeListAllBackedBySnapshot(internalVolName, internalSnapName) - if err != nil { - log.WithFields(log.Fields{ - "snapshotName": internalSnapName, - "parentVolumeName": internalVolName, - "error": err, - }).Error("Could not list volumes backed by snapshot.") - return err - } else if len(childVolumes) == 0 { - return nil - } - - // We're going to start a single split operation, but there could be multiple children, so we - // sort the volumes by name to not have more than one split operation running at a time. - sort.Strings(childVolumes) - - splitResponse, err := client.VolumeCloneSplitStart(childVolumes[0]) - if err = api.GetError(splitResponse, err); err != nil { - log.WithFields(log.Fields{ - "snapshotName": internalSnapName, - "parentVolumeName": internalVolName, - "cloneVolumeName": childVolumes[0], - "error": err, - }).Error("Could not begin splitting clone from snapshot.") - return fmt.Errorf("error splitting clone: %v", err) - } - - log.WithFields(log.Fields{ - "snapshotName": internalSnapName, - "parentVolumeName": internalVolName, - "cloneVolumeName": childVolumes[0], - }).Info("Began splitting clone from snapshot.") - - return nil -} - -// GetVolume checks for the existence of a volume. It returns nil if the volume -// exists and an error if it does not (or the API call fails). -func GetVolume(name string, client *api.Client, config *drivers.OntapStorageDriverConfig) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "GetVolume", "Type": "ontap_common"} - log.WithFields(fields).Debug(">>>> GetVolume") - defer log.WithFields(fields).Debug("<<<< GetVolume") - } - - volExists, err := client.VolumeExists(name) - if err != nil { - return fmt.Errorf("error checking for existing volume: %v", err) - } - if !volExists { - log.WithField("flexvol", name).Debug("Flexvol not found.") - return fmt.Errorf("volume %s does not exist", name) - } - - return nil -} - -type ontapPerformanceClass string - -const ( - ontapHDD ontapPerformanceClass = "hdd" - ontapHybrid ontapPerformanceClass = "hybrid" - ontapSSD ontapPerformanceClass = "ssd" -) - -var ontapPerformanceClasses = map[ontapPerformanceClass]map[string]sa.Offer{ - ontapHDD: {sa.Media: sa.NewStringOffer(sa.HDD)}, - ontapHybrid: {sa.Media: sa.NewStringOffer(sa.Hybrid)}, - ontapSSD: {sa.Media: sa.NewStringOffer(sa.SSD)}, -} - -// getStorageBackendSpecsCommon discovers the aggregates assigned to the configured SVM, and it updates the specified Backend -// object with StoragePools and their associated attributes. -func getStorageBackendSpecsCommon( - d StorageDriver, backend *storage.Backend, poolAttributes map[string]sa.Offer, -) (err error) { - - client := d.GetAPI() - config := d.GetConfig() - driverName := d.Name() - - // Handle panics from the API layer - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("unable to inspect ONTAP backend: %v\nStack trace:\n%s", r, debug.Stack()) - } - }() - - // Get the aggregates assigned to the SVM. There must be at least one! - vserverAggrs, err := client.VserverGetAggregateNames() - if err != nil { - return - } - if len(vserverAggrs) == 0 { - err = fmt.Errorf("SVM %s has no assigned aggregates", config.SVM) - return - } - - log.WithFields(log.Fields{ - "svm": config.SVM, - "pools": vserverAggrs, - }).Debug("Read storage pools assigned to SVM.") - - // Define a storage pool for each of the SVM's aggregates - storagePools := make(map[string]*storage.Pool) - for _, aggrName := range vserverAggrs { - storagePools[aggrName] = storage.NewStoragePool(backend, aggrName) - } - - // Use all assigned aggregates unless 'aggregate' is set in the config - if config.Aggregate != "" { - - // Make sure the configured aggregate is available to the SVM - if _, ok := storagePools[config.Aggregate]; !ok { - err = fmt.Errorf("the assigned aggregates for SVM %s do not include the configured aggregate %s", - config.SVM, config.Aggregate) - return - } - - log.WithFields(log.Fields{ - "driverName": driverName, - "aggregate": config.Aggregate, - }).Debug("Provisioning will be restricted to the aggregate set in the backend config.") - - storagePools = make(map[string]*storage.Pool) - storagePools[config.Aggregate] = storage.NewStoragePool(backend, config.Aggregate) - } - - // Update pools with aggregate info (i.e. MediaType) - aggrErr := getVserverAggregateAttributes(d, &storagePools) - - if zerr, ok := aggrErr.(api.ZapiError); ok && zerr.IsScopeError() { - log.WithFields(log.Fields{ - "username": config.Username, - }).Warn("User has insufficient privileges to obtain aggregate info. " + - "Storage classes with physical attributes such as 'media' will not match pools on this backend.") - } else if aggrErr != nil { - log.Errorf("Could not obtain aggregate info; storage classes with physical attributes such as 'media' will"+ - " not match pools on this backend: %v.", aggrErr) - } - - // Add attributes common to each pool and register pools with backend - for _, pool := range storagePools { - - for attrName, offer := range poolAttributes { - pool.Attributes[attrName] = offer - } - - backend.AddStoragePool(pool) - } - - return -} - -// getVserverAggregateAttributes gets pool attributes using vserver-show-aggr-get-iter, which will only succeed on Data ONTAP 9 and later. -// If the aggregate attributes are read successfully, the pools passed to this function are updated accordingly. -func getVserverAggregateAttributes(d StorageDriver, storagePools *map[string]*storage.Pool) error { - - result, err := d.GetAPI().VserverShowAggrGetIterRequest() - if err != nil { - return err - } - if zerr := api.NewZapiError(result.Result); !zerr.IsPassed() { - return zerr - } - - if result.Result.AttributesListPtr != nil { - for _, aggr := range result.Result.AttributesListPtr.ShowAggregatesPtr { - aggrName := string(aggr.AggregateName()) - aggrType := aggr.AggregateType() - - // Find matching pool. There are likely more aggregates in the cluster than those assigned to this backend's SVM. - pool, ok := (*storagePools)[aggrName] - if !ok { - continue - } - - // Get the storage attributes (i.e. MediaType) corresponding to the aggregate type - storageAttrs, ok := ontapPerformanceClasses[ontapPerformanceClass(aggrType)] - if !ok { - log.WithFields(log.Fields{ - "aggregate": aggrName, - "mediaType": aggrType, - }).Debug("Aggregate has unknown performance characteristics.") - - continue - } - - log.WithFields(log.Fields{ - "aggregate": aggrName, - "mediaType": aggrType, - }).Debug("Read aggregate attributes.") - - // Update the pool with the aggregate storage attributes - for attrName, attr := range storageAttrs { - pool.Attributes[attrName] = attr - } - } - } - - return nil -} - -func getVolumeOptsCommon( - volConfig *storage.VolumeConfig, - pool *storage.Pool, - requests map[string]sa.Request, -) map[string]string { - opts := make(map[string]string) - if pool != nil { - opts["aggregate"] = pool.Name - } - if provisioningTypeReq, ok := requests[sa.ProvisioningType]; ok { - if p, ok := provisioningTypeReq.Value().(string); ok { - if p == "thin" { - opts["spaceReserve"] = "none" - } else if p == "thick" { - // p will equal "thick" here - opts["spaceReserve"] = "volume" - } else { - log.WithFields(log.Fields{ - "provisioner": "ONTAP", - "method": "getVolumeOptsCommon", - "provisioningType": provisioningTypeReq.Value(), - }).Warnf("Expected 'thick' or 'thin' for %s; ignoring.", - sa.ProvisioningType) - } - } else { - log.WithFields(log.Fields{ - "provisioner": "ONTAP", - "method": "getVolumeOptsCommon", - "provisioningType": provisioningTypeReq.Value(), - }).Warnf("Expected string for %s; ignoring.", sa.ProvisioningType) - } - } - if encryptionReq, ok := requests[sa.Encryption]; ok { - if encryption, ok := encryptionReq.Value().(bool); ok { - if encryption { - opts["encryption"] = "true" - } - } else { - log.WithFields(log.Fields{ - "provisioner": "ONTAP", - "method": "getVolumeOptsCommon", - "encryption": encryptionReq.Value(), - }).Warnf("Expected bool for %s; ignoring.", sa.Encryption) - } - } - if volConfig.SnapshotPolicy != "" { - opts["snapshotPolicy"] = volConfig.SnapshotPolicy - } - if volConfig.SnapshotReserve != "" { - opts["snapshotReserve"] = volConfig.SnapshotReserve - } - if volConfig.UnixPermissions != "" { - opts["unixPermissions"] = volConfig.UnixPermissions - } - if volConfig.SnapshotDir != "" { - opts["snapshotDir"] = volConfig.SnapshotDir - } - if volConfig.ExportPolicy != "" { - opts["exportPolicy"] = volConfig.ExportPolicy - } - if volConfig.SpaceReserve != "" { - opts["spaceReserve"] = volConfig.SpaceReserve - } - if volConfig.SecurityStyle != "" { - opts["securityStyle"] = volConfig.SecurityStyle - } - if volConfig.SplitOnClone != "" { - opts["splitOnClone"] = volConfig.SplitOnClone - } - if volConfig.FileSystem != "" { - opts["fileSystemType"] = volConfig.FileSystem - } - if volConfig.Encryption != "" { - opts["encryption"] = volConfig.Encryption - } - - return opts -} - -func getInternalVolumeNameCommon(commonConfig *drivers.CommonStorageDriverConfig, name string) string { - - if tridentconfig.UsingPassthroughStore { - // With a passthrough store, the name mapping must remain reversible - return *commonConfig.StoragePrefix + name - } else { - // With an external store, any transformation of the name is fine - internal := drivers.GetCommonInternalVolumeName(commonConfig, name) - internal = strings.Replace(internal, "-", "_", -1) // ONTAP disallows hyphens - internal = strings.Replace(internal, ".", "_", -1) // ONTAP disallows periods - internal = strings.Replace(internal, "__", "_", -1) // Remove any double underscores - return internal - } -} - -func createPrepareCommon(d storage.Driver, volConfig *storage.VolumeConfig) error { - - volConfig.InternalName = d.GetInternalVolumeName(volConfig.Name) - - if volConfig.CloneSourceVolume != "" { - volConfig.CloneSourceVolumeInternal = - d.GetInternalVolumeName(volConfig.CloneSourceVolume) - } - - return nil -} - -func getExternalConfig(config drivers.OntapStorageDriverConfig) interface{} { - - // Clone the config so we don't risk altering the original - var cloneConfig drivers.OntapStorageDriverConfig - drivers.Clone(config, &cloneConfig) - - drivers.SanitizeCommonStorageDriverConfig(cloneConfig.CommonStorageDriverConfig) - cloneConfig.Username = "" // redact the username - cloneConfig.Password = "" // redact the password - return cloneConfig -} - -// resizeValidation performs needed validation checks prior to the resize operation. -func resizeValidation(name string, sizeBytes uint64, - volumeExists func(string) (bool, error), - volumeSize func(string) (int, error)) (uint64, error) { - - // Check that volume exists - volExists, err := volumeExists(name) - if err != nil { - log.WithField("error", err).Errorf("Error checking for existing volume.") - return 0, fmt.Errorf("error occurred checking for existing volume") - } - if !volExists { - return 0, fmt.Errorf("volume %s does not exist", name) - } - - // Check that current size is smaller than requested size - volSize, err := volumeSize(name) - if err != nil { - log.WithField("error", err).Errorf("Error checking volume size.") - return 0, fmt.Errorf("error occurred when checking volume size") - } - volSizeBytes := uint64(volSize) - - if sizeBytes < volSizeBytes { - return 0, fmt.Errorf("requested size %d is less than existing volume size %d", sizeBytes, volSize) - } - - return volSizeBytes, nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas.go deleted file mode 100644 index 33af02278..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas.go +++ /dev/null @@ -1,760 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package ontap - -import ( - "fmt" - "strconv" - "strings" - - "github.com/RoaringBitmap/roaring" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/storage_drivers/ontap/api" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -// NASStorageDriver is for NFS storage provisioning -type NASStorageDriver struct { - initialized bool - Config drivers.OntapStorageDriverConfig - API *api.Client - Telemetry *Telemetry -} - -func (d *NASStorageDriver) GetConfig() *drivers.OntapStorageDriverConfig { - return &d.Config -} - -func (d *NASStorageDriver) GetAPI() *api.Client { - return d.API -} - -func (d *NASStorageDriver) GetTelemetry() *Telemetry { - d.Telemetry.Telemetry = tridentconfig.OrchestratorTelemetry - return d.Telemetry -} - -// Name is for returning the name of this driver -func (d *NASStorageDriver) Name() string { - return drivers.OntapNASStorageDriverName -} - -// Initialize from the provided config -func (d *NASStorageDriver) Initialize( - context tridentconfig.DriverContext, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, -) error { - - if commonConfig.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Initialize", "Type": "NASStorageDriver"} - log.WithFields(fields).Debug(">>>> Initialize") - defer log.WithFields(fields).Debug("<<<< Initialize") - } - - // Parse the config - config, err := InitializeOntapConfig(context, configJSON, commonConfig) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - d.API, err = InitializeOntapDriver(config) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - err = d.validate() - if err != nil { - return fmt.Errorf("error validating %s driver: %v", d.Name(), err) - } - - // Set up the autosupport heartbeat - d.Telemetry = NewOntapTelemetry(d) - d.Telemetry.Start() - - d.initialized = true - return nil -} - -func (d *NASStorageDriver) Initialized() bool { - return d.initialized -} - -func (d *NASStorageDriver) Terminate() { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Terminate", "Type": "NASStorageDriver"} - log.WithFields(fields).Debug(">>>> Terminate") - defer log.WithFields(fields).Debug("<<<< Terminate") - } - if d.Telemetry != nil { - d.Telemetry.Stop() - } - d.initialized = false -} - -// Validate the driver configuration and execution environment -func (d *NASStorageDriver) validate() error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "validate", "Type": "NASStorageDriver"} - log.WithFields(fields).Debug(">>>> validate") - defer log.WithFields(fields).Debug("<<<< validate") - } - - err := ValidateNASDriver(d.API, &d.Config) - if err != nil { - return fmt.Errorf("driver validation failed: %v", err) - } - - return nil -} - -// Create a volume with the specified options -func (d *NASStorageDriver) Create( - volConfig *storage.VolumeConfig, storagePool *storage.Pool, volAttributes map[string]sa.Request, -) error { - - name := volConfig.InternalName - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Create", - "Type": "NASStorageDriver", - "name": name, - "attrs": volAttributes, - } - log.WithFields(fields).Debug(">>>> Create") - defer log.WithFields(fields).Debug("<<<< Create") - } - - // If the volume already exists, bail out - volExists, err := d.API.VolumeExists(name) - if err != nil { - return fmt.Errorf("error checking for existing volume: %v", err) - } - if volExists { - return drivers.NewVolumeExistsError(name) - } - - // Determine volume size in bytes - requestedSize, err := utils.ConvertSizeToBytes(volConfig.Size) - if err != nil { - return fmt.Errorf("could not convert volume size %s: %v", volConfig.Size, err) - } - sizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) - if err != nil { - return fmt.Errorf("%v is an invalid volume size: %v", volConfig.Size, err) - } - sizeBytes, err = GetVolumeSize(sizeBytes, d.Config) - if err != nil { - return err - } - - // Get options - opts, err := d.GetVolumeOpts(volConfig, storagePool, volAttributes) - if err != nil { - return err - } - - // get options with default fallback values - // see also: ontap_common.go#PopulateConfigurationDefaults - size := strconv.FormatUint(sizeBytes, 10) - spaceReserve := utils.GetV(opts, "spaceReserve", d.Config.SpaceReserve) - snapshotPolicy := utils.GetV(opts, "snapshotPolicy", d.Config.SnapshotPolicy) - snapshotReserve := utils.GetV(opts, "snapshotReserve", d.Config.SnapshotReserve) - unixPermissions := utils.GetV(opts, "unixPermissions", d.Config.UnixPermissions) - snapshotDir := utils.GetV(opts, "snapshotDir", d.Config.SnapshotDir) - exportPolicy := utils.GetV(opts, "exportPolicy", d.Config.ExportPolicy) - aggregate := utils.GetV(opts, "aggregate", d.Config.Aggregate) - securityStyle := utils.GetV(opts, "securityStyle", d.Config.SecurityStyle) - encryption := utils.GetV(opts, "encryption", d.Config.Encryption) - - if aggrLimitsErr := checkAggregateLimits(aggregate, spaceReserve, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - if _, _, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, d.Config.CommonStorageDriverConfig); checkVolumeSizeLimitsError != nil { - return checkVolumeSizeLimitsError - } - - enableSnapshotDir, err := strconv.ParseBool(snapshotDir) - if err != nil { - return fmt.Errorf("invalid boolean value for snapshotDir: %v", err) - } - - enableEncryption, err := strconv.ParseBool(encryption) - if err != nil { - return fmt.Errorf("invalid boolean value for encryption: %v", err) - } - - snapshotReserveInt, err := GetSnapshotReserve(snapshotPolicy, snapshotReserve) - if err != nil { - return fmt.Errorf("invalid value for snapshotReserve: %v", err) - } - - log.WithFields(log.Fields{ - "name": name, - "size": size, - "spaceReserve": spaceReserve, - "snapshotPolicy": snapshotPolicy, - "snapshotReserve": snapshotReserveInt, - "unixPermissions": unixPermissions, - "snapshotDir": enableSnapshotDir, - "exportPolicy": exportPolicy, - "aggregate": aggregate, - "securityStyle": securityStyle, - "encryption": enableEncryption, - }).Debug("Creating Flexvol.") - - // Create the volume - volCreateResponse, err := d.API.VolumeCreate( - name, aggregate, size, spaceReserve, snapshotPolicy, unixPermissions, - exportPolicy, securityStyle, enableEncryption, snapshotReserveInt) - - if err = api.GetError(volCreateResponse, err); err != nil { - if zerr, ok := err.(api.ZapiError); ok { - // Handle case where the Create is passed to every Docker Swarm node - if zerr.Code() == azgo.EAPIERROR && strings.HasSuffix(strings.TrimSpace(zerr.Reason()), "Job exists") { - log.WithField("volume", name).Warn("Volume create job already exists, skipping volume create on this node.") - return nil - } - } - return fmt.Errorf("error creating volume: %v", err) - } - - // Disable '.snapshot' to allow official mysql container's chmod-in-init to work - if !enableSnapshotDir { - snapDirResponse, err := d.API.VolumeDisableSnapshotDirectoryAccess(name) - if err = api.GetError(snapDirResponse, err); err != nil { - return fmt.Errorf("error disabling snapshot directory access: %v", err) - } - } - - // Mount the volume at the specified junction - mountResponse, err := d.API.VolumeMount(name, "/"+name) - if err = api.GetError(mountResponse, err); err != nil { - return fmt.Errorf("error mounting volume to junction: %v", err) - } - - return nil -} - -// Create a volume clone -func (d *NASStorageDriver) CreateClone(volConfig *storage.VolumeConfig) error { - - name := volConfig.InternalName - source := volConfig.CloneSourceVolumeInternal - snapshot := volConfig.CloneSourceSnapshot - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateClone", - "Type": "NASStorageDriver", - "name": name, - "source": source, - "snapshot": snapshot, - } - log.WithFields(fields).Debug(">>>> CreateClone") - defer log.WithFields(fields).Debug("<<<< CreateClone") - } - - opts, err := d.GetVolumeOpts(volConfig, nil, make(map[string]sa.Request)) - if err != nil { - return err - } - - split, err := strconv.ParseBool(utils.GetV(opts, "splitOnClone", d.Config.SplitOnClone)) - if err != nil { - return fmt.Errorf("invalid boolean value for splitOnClone: %v", err) - } - - log.WithField("splitOnClone", split).Debug("Creating volume clone.") - return CreateOntapClone(name, source, snapshot, split, &d.Config, d.API) -} - -// Destroy the volume -func (d *NASStorageDriver) Destroy(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Destroy", - "Type": "NASStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Destroy") - defer log.WithFields(fields).Debug("<<<< Destroy") - } - - // TODO: If this is the parent of one or more clones, those clones have to split from this - // volume before it can be deleted, which means separate copies of those volumes. - // If there are a lot of clones on this volume, that could seriously balloon the amount of - // utilized space. Is that what we want? Or should we just deny the delete, and force the - // user to keep the volume around until all of the clones are gone? If we do that, need a - // way to list the clones. Maybe volume inspect. - - volDestroyResponse, err := d.API.VolumeDestroy(name, true) - if err != nil { - return fmt.Errorf("error destroying volume %v: %v", name, err) - } - if zerr := api.NewZapiError(volDestroyResponse); !zerr.IsPassed() { - - // It's not an error if the volume no longer exists - if zerr.Code() == azgo.EVOLUMEDOESNOTEXIST { - log.WithField("volume", name).Warn("Volume already deleted.") - } else { - return fmt.Errorf("error destroying volume %v: %v", name, zerr) - } - } - - return nil -} - -func (d *NASStorageDriver) Import(volConfig *storage.VolumeConfig, originalName string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Import", - "Type": "NASStorageDriver", - "originalName": originalName, - "newName": volConfig.InternalName, - "notManaged": volConfig.ImportNotManaged, - } - log.WithFields(fields).Debug(">>>> Import") - defer log.WithFields(fields).Debug("<<<< Import") - } - - // Ensure the volume exists - flexvol, err := d.API.VolumeGet(originalName) - if err != nil { - return err - } else if flexvol == nil { - return fmt.Errorf("volume %s not found", originalName) - } - - // Validate the volume is what it should be - if flexvol.VolumeIdAttributesPtr != nil { - volumeIdAttrs := flexvol.VolumeIdAttributes() - if volumeIdAttrs.TypePtr != nil && volumeIdAttrs.Type() != "rw" { - log.WithField("originalName", originalName).Error("Could not import volume, type is not rw.") - return fmt.Errorf("volume %s type is %s, not rw", originalName, volumeIdAttrs.Type()) - } - } - - // Get the volume size - if flexvol.VolumeSpaceAttributesPtr == nil || flexvol.VolumeSpaceAttributesPtr.SizePtr == nil { - log.WithField("originalName", originalName).Errorf("Could not import volume, size not available") - return fmt.Errorf("volume %s size not available", originalName) - } - volConfig.Size = strconv.FormatInt(int64(flexvol.VolumeSpaceAttributesPtr.Size()), 10) - - // Rename the volume if Trident will manage its lifecycle - if !volConfig.ImportNotManaged { - renameResponse, err := d.API.VolumeRename(originalName, volConfig.InternalName) - if err = api.GetError(renameResponse, err); err != nil { - log.WithField("originalName", originalName).Errorf("Could not import volume, rename failed: %v", err) - return fmt.Errorf("volume %s rename failed: %v", originalName, err) - } - } - - // Make sure we're not importing a volume without a junction path when not managed - if volConfig.ImportNotManaged { - if flexvol.VolumeIdAttributesPtr == nil { - return fmt.Errorf("unable to read volume id attributes of volume %s", originalName) - } else if flexvol.VolumeIdAttributesPtr.JunctionPathPtr == nil || flexvol.VolumeIdAttributesPtr.JunctionPath() == "" { - return fmt.Errorf("junction path is not set for volume %s", originalName) - } - } - - return nil -} - -// Rename changes the name of a volume -func (d *NASStorageDriver) Rename(name string, new_name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Rename", - "Type": "NASStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Rename") - defer log.WithFields(fields).Debug("<<<< Rename") - } - - renameResponse, err := d.API.VolumeRename(name, new_name) - if err = api.GetError(renameResponse, err); err != nil { - log.WithField("name", name).Warnf("Could not rename volume: %v", err) - return fmt.Errorf("could not rename volume %s: %v", name, err) - } - - return nil -} - -// Publish the volume to the host specified in publishInfo. This method may or may not be running on the host -// where the volume will be mounted, so it should limit itself to updating access rules, initiator groups, etc. -// that require some host identity (but not locality) as well as storage controller API access. -func (d *NASStorageDriver) Publish(name string, publishInfo *utils.VolumePublishInfo) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Publish", - "Type": "NASStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Publish") - defer log.WithFields(fields).Debug("<<<< Publish") - } - - // Add fields needed by Attach - publishInfo.NfsPath = fmt.Sprintf("/%s", name) - publishInfo.NfsServerIP = d.Config.DataLIF - publishInfo.FilesystemType = "nfs" - publishInfo.MountOptions = d.Config.NfsMountOptions - - return nil -} - -// GetSnapshot gets a snapshot. To distinguish between an API error reading the snapshot -// and a non-existent snapshot, this method may return (nil, nil). -func (d *NASStorageDriver) GetSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshot", - "Type": "NASStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshot") - defer log.WithFields(fields).Debug("<<<< GetSnapshot") - } - - return GetSnapshot(snapConfig, &d.Config, d.API, d.API.VolumeSize) -} - -// Return the list of snapshots associated with the specified volume -func (d *NASStorageDriver) GetSnapshots(volConfig *storage.VolumeConfig) ([]*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshots", - "Type": "NASStorageDriver", - "volumeName": volConfig.InternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshots") - defer log.WithFields(fields).Debug("<<<< GetSnapshots") - } - - return GetSnapshots(volConfig, &d.Config, d.API, d.API.VolumeSize) -} - -// CreateSnapshot creates a snapshot for the given volume -func (d *NASStorageDriver) CreateSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateSnapshot", - "Type": "NASStorageDriver", - "snapshotName": internalSnapName, - "sourceVolume": internalVolName, - } - log.WithFields(fields).Debug(">>>> CreateSnapshot") - defer log.WithFields(fields).Debug("<<<< CreateSnapshot") - } - - return CreateSnapshot(snapConfig, &d.Config, d.API, d.API.VolumeSize) -} - -// RestoreSnapshot restores a volume (in place) from a snapshot. -func (d *NASStorageDriver) RestoreSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "RestoreSnapshot", - "Type": "NASStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> RestoreSnapshot") - defer log.WithFields(fields).Debug("<<<< RestoreSnapshot") - } - - return RestoreSnapshot(snapConfig, &d.Config, d.API) -} - -// DeleteSnapshot creates a snapshot of a volume. -func (d *NASStorageDriver) DeleteSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "DeleteSnapshot", - "Type": "NASStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> DeleteSnapshot") - defer log.WithFields(fields).Debug("<<<< DeleteSnapshot") - } - - return DeleteSnapshot(snapConfig, &d.Config, d.API) -} - -// Test for the existence of a volume -func (d *NASStorageDriver) Get(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Get", "Type": "NASStorageDriver"} - log.WithFields(fields).Debug(">>>> Get") - defer log.WithFields(fields).Debug("<<<< Get") - } - - return GetVolume(name, d.API, &d.Config) -} - -// Retrieve storage backend capabilities -func (d *NASStorageDriver) GetStorageBackendSpecs(backend *storage.Backend) error { - if d.Config.BackendName == "" { - // Use the old naming scheme if no name is specified - backend.Name = "ontapnas_" + d.Config.DataLIF - } else { - backend.Name = d.Config.BackendName - } - poolAttrs := d.getStoragePoolAttributes() - return getStorageBackendSpecsCommon(d, backend, poolAttrs) -} - -func (d *NASStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { - - return map[string]sa.Offer{ - sa.BackendType: sa.NewStringOffer(d.Name()), - sa.Snapshots: sa.NewBoolOffer(true), - sa.Clones: sa.NewBoolOffer(true), - sa.Encryption: sa.NewBoolOffer(true), - sa.ProvisioningType: sa.NewStringOffer("thick", "thin"), - } -} - -func (d *NASStorageDriver) GetVolumeOpts( - volConfig *storage.VolumeConfig, - pool *storage.Pool, - requests map[string]sa.Request, -) (map[string]string, error) { - return getVolumeOptsCommon(volConfig, pool, requests), nil -} - -func (d *NASStorageDriver) GetInternalVolumeName(name string) string { - return getInternalVolumeNameCommon(d.Config.CommonStorageDriverConfig, name) -} - -func (d *NASStorageDriver) CreatePrepare(volConfig *storage.VolumeConfig) error { - return createPrepareCommon(d, volConfig) -} - -func (d *NASStorageDriver) CreateFollowup(volConfig *storage.VolumeConfig) error { - - volConfig.AccessInfo.NfsServerIP = d.Config.DataLIF - volConfig.AccessInfo.MountOptions = strings.TrimPrefix(d.Config.NfsMountOptions, "-o ") - volConfig.FileSystem = "" - - // Set correct junction path - flexvol, err := d.API.VolumeGet(volConfig.InternalName) - if err != nil { - return err - } else if flexvol == nil { - return fmt.Errorf("volume %s not found", volConfig.InternalName) - } - - if flexvol.VolumeIdAttributesPtr == nil { - return fmt.Errorf("error reading volume id attributes for volume %s", volConfig.InternalName) - } - if flexvol.VolumeIdAttributesPtr.JunctionPathPtr == nil || flexvol.VolumeIdAttributesPtr.JunctionPath() == "" { - // Flexvol is not mounted, we need to mount it - volConfig.AccessInfo.NfsPath = "/" + volConfig.InternalName - mountResponse, err := d.API.VolumeMount(volConfig.InternalName, volConfig.AccessInfo.NfsPath) - if err = api.GetError(mountResponse, err); err != nil { - return fmt.Errorf("error mounting volume to junction %s; %v", volConfig.AccessInfo.NfsPath, err) - } - } else { - volConfig.AccessInfo.NfsPath = flexvol.VolumeIdAttributesPtr.JunctionPath() - } - return nil -} - -func (d *NASStorageDriver) GetProtocol() tridentconfig.Protocol { - return tridentconfig.File -} - -func (d *NASStorageDriver) StoreConfig( - b *storage.PersistentStorageBackendConfig, -) { - drivers.SanitizeCommonStorageDriverConfig(d.Config.CommonStorageDriverConfig) - b.OntapConfig = &d.Config -} - -func (d *NASStorageDriver) GetExternalConfig() interface{} { - return getExternalConfig(d.Config) -} - -// GetVolumeExternal queries the storage backend for all relevant info about -// a single container volume managed by this driver and returns a VolumeExternal -// representation of the volume. -func (d *NASStorageDriver) GetVolumeExternal(name string) (*storage.VolumeExternal, error) { - - volumeAttributes, err := d.API.VolumeGet(name) - if err != nil { - return nil, err - } - - return d.getVolumeExternal(volumeAttributes), nil -} - -// GetVolumeExternalWrappers queries the storage backend for all relevant info about -// container volumes managed by this driver. It then writes a VolumeExternal -// representation of each volume to the supplied channel, closing the channel -// when finished. -func (d *NASStorageDriver) GetVolumeExternalWrappers( - channel chan *storage.VolumeExternalWrapper) { - - // Let the caller know we're done by closing the channel - defer close(channel) - - // Get all volumes matching the storage prefix - volumesResponse, err := d.API.VolumeGetAll(*d.Config.StoragePrefix) - if err = api.GetError(volumesResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Convert all volumes to VolumeExternal and write them to the channel - if volumesResponse.Result.AttributesListPtr != nil { - for _, volume := range volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr { - channel <- &storage.VolumeExternalWrapper{Volume: d.getVolumeExternal(&volume), Error: nil} - } - } -} - -// getVolumeExternal is a private method that accepts info about a volume -// as returned by the storage backend and formats it as a VolumeExternal -// object. -func (d *NASStorageDriver) getVolumeExternal( - volumeAttrs *azgo.VolumeAttributesType) *storage.VolumeExternal { - - volumeExportAttrs := volumeAttrs.VolumeExportAttributesPtr - volumeIDAttrs := volumeAttrs.VolumeIdAttributesPtr - volumeSecurityAttrs := volumeAttrs.VolumeSecurityAttributesPtr - volumeSecurityUnixAttrs := volumeSecurityAttrs.VolumeSecurityUnixAttributesPtr - volumeSpaceAttrs := volumeAttrs.VolumeSpaceAttributesPtr - volumeSnapshotAttrs := volumeAttrs.VolumeSnapshotAttributesPtr - - internalName := string(volumeIDAttrs.Name()) - name := internalName - if strings.HasPrefix(internalName, *d.Config.StoragePrefix) { - name = internalName[len(*d.Config.StoragePrefix):] - } - - volumeConfig := &storage.VolumeConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: name, - InternalName: internalName, - Size: strconv.FormatInt(int64(volumeSpaceAttrs.Size()), 10), - Protocol: tridentconfig.File, - SnapshotPolicy: volumeSnapshotAttrs.SnapshotPolicy(), - ExportPolicy: volumeExportAttrs.Policy(), - SnapshotDir: strconv.FormatBool(volumeSnapshotAttrs.SnapdirAccessEnabled()), - UnixPermissions: volumeSecurityUnixAttrs.Permissions(), - StorageClass: "", - AccessMode: tridentconfig.ReadWriteMany, - AccessInfo: utils.VolumeAccessInfo{}, - BlockSize: "", - FileSystem: "", - } - - return &storage.VolumeExternal{ - Config: volumeConfig, - Pool: volumeIDAttrs.ContainingAggregateName(), - } -} - -// GetUpdateType returns a bitmap populated with updates to the driver -func (d *NASStorageDriver) GetUpdateType(driverOrig storage.Driver) *roaring.Bitmap { - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetUpdateType", - "Type": "NASStorageDriver", - } - log.WithFields(fields).Debug(">>>> GetUpdateType") - defer log.WithFields(fields).Debug("<<<< GetUpdateType") - } - - bitmap := roaring.New() - dOrig, ok := driverOrig.(*NASStorageDriver) - if !ok { - bitmap.Add(storage.InvalidUpdate) - return bitmap - } - - if d.Config.DataLIF != dOrig.Config.DataLIF { - bitmap.Add(storage.VolumeAccessInfoChange) - } - - if d.Config.Password != dOrig.Config.Password { - bitmap.Add(storage.PasswordChange) - } - - if d.Config.Username != dOrig.Config.Username { - bitmap.Add(storage.UsernameChange) - } - - return bitmap -} - -// Resize expands the volume size. -func (d *NASStorageDriver) Resize(volConfig *storage.VolumeConfig, sizeBytes uint64) error { - name := volConfig.InternalName - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Resize", - "Type": "NASStorageDriver", - "name": name, - "sizeBytes": sizeBytes, - } - log.WithFields(fields).Debug(">>>> Resize") - defer log.WithFields(fields).Debug("<<<< Resize") - } - - flexvolSize, err := resizeValidation(name, sizeBytes, d.API.VolumeExists, d.API.VolumeSize) - if err != nil { - return err - } - - if flexvolSize == sizeBytes { - return nil - } - - if aggrLimitsErr := checkAggregateLimitsForFlexvol(name, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - if _, _, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, d.Config.CommonStorageDriverConfig); checkVolumeSizeLimitsError != nil { - return checkVolumeSizeLimitsError - } - - response, err := d.API.VolumeSetSize(name, strconv.FormatUint(sizeBytes, 10)) - if err = api.GetError(response.Result, err); err != nil { - log.WithField("error", err).Error("Volume resize failed.") - return fmt.Errorf("volume resize failed") - } - - volConfig.Size = strconv.FormatUint(sizeBytes, 10) - return nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas_flexgroup.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas_flexgroup.go deleted file mode 100644 index bfa223581..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas_flexgroup.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. -package ontap - -import ( - "errors" - "fmt" - "math" - "strconv" - "strings" - - "github.com/RoaringBitmap/roaring" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/storage_drivers/ontap/api" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -// NASFlexGroupStorageDriver is for NFS FlexGroup storage provisioning -type NASFlexGroupStorageDriver struct { - initialized bool - Config drivers.OntapStorageDriverConfig - API *api.Client - Telemetry *Telemetry -} - -func (d *NASFlexGroupStorageDriver) GetConfig() *drivers.OntapStorageDriverConfig { - return &d.Config -} - -func (d *NASFlexGroupStorageDriver) GetAPI() *api.Client { - return d.API -} - -func (d *NASFlexGroupStorageDriver) GetTelemetry() *Telemetry { - d.Telemetry.Telemetry = tridentconfig.OrchestratorTelemetry - return d.Telemetry -} - -// Name is for returning the name of this driver -func (d *NASFlexGroupStorageDriver) Name() string { - return drivers.OntapNASFlexGroupStorageDriverName -} - -// Initialize from the provided config -func (d *NASFlexGroupStorageDriver) Initialize( - context tridentconfig.DriverContext, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, -) error { - - if commonConfig.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Initialize", "Type": "NASFlexGroupStorageDriver"} - log.WithFields(fields).Debug(">>>> Initialize") - defer log.WithFields(fields).Debug("<<<< Initialize") - } - - // Parse the config - config, err := InitializeOntapConfig(context, configJSON, commonConfig) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - d.API, err = InitializeOntapDriver(config) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - err = d.validate() - if err != nil { - return fmt.Errorf("error validating %s driver: %v", d.Name(), err) - } - - // Set up the autosupport heartbeat - d.Telemetry = NewOntapTelemetry(d) - d.Telemetry.Start() - - d.initialized = true - return nil -} - -func (d *NASFlexGroupStorageDriver) Initialized() bool { - return d.initialized -} - -func (d *NASFlexGroupStorageDriver) Terminate() { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Terminate", "Type": "NASFlexGroupStorageDriver"} - log.WithFields(fields).Debug(">>>> Terminate") - defer log.WithFields(fields).Debug("<<<< Terminate") - } - if d.Telemetry != nil { - d.Telemetry.Stop() - } - d.initialized = false -} - -// Validate the driver configuration and execution environment -func (d *NASFlexGroupStorageDriver) validate() error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "validate", "Type": "NASFlexGroupStorageDriver"} - log.WithFields(fields).Debug(">>>> validate") - defer log.WithFields(fields).Debug("<<<< validate") - } - - if !d.API.SupportsFeature(api.NetAppFlexGroups) { - return fmt.Errorf("ONTAP version does not support FlexGroups") - } - - err := ValidateNASDriver(d.API, &d.Config) - if err != nil { - return fmt.Errorf("driver validation failed: %v", err) - } - - return nil -} - -// Create a volume with the specified options -func (d *NASFlexGroupStorageDriver) Create( - volConfig *storage.VolumeConfig, storagePool *storage.Pool, volAttributes map[string]sa.Request, -) error { - - name := volConfig.InternalName - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Create", - "Type": "NASFlexGroupStorageDriver", - "name": name, - "attrs": volAttributes, - } - log.WithFields(fields).Debug(">>>> Create") - defer log.WithFields(fields).Debug("<<<< Create") - } - - // If the volume already exists, bail out - volExists, err := d.API.FlexGroupExists(name) - if err != nil { - return fmt.Errorf("error checking for existing FlexGroup: %v", err) - } - if volExists { - return drivers.NewVolumeExistsError(name) - } - - // Determine volume size in bytes - requestedSize, err := utils.ConvertSizeToBytes(volConfig.Size) - if err != nil { - return fmt.Errorf("could not convert volume size %s: %v", volConfig.Size, err) - } - sizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) - if err != nil { - return fmt.Errorf("%v is an invalid volume size: %v", volConfig.Size, err) - } - sizeBytes, err = GetVolumeSize(sizeBytes, d.Config) - if err != nil { - return err - } - if sizeBytes > math.MaxInt64 { - return errors.New("invalid size requested") - } - size := int(sizeBytes) - - // Get the aggregates assigned to the SVM. There must be at least one! - vserverAggrs, err := d.API.VserverGetAggregateNames() - if err != nil { - return err - } - - if len(vserverAggrs) == 0 { - err = fmt.Errorf("no assigned aggregates found") - return err - } - - vserverAggrNames := make([]azgo.AggrNameType, 0) - for _, aggrName := range vserverAggrs { - vserverAggrNames = append(vserverAggrNames, azgo.AggrNameType(aggrName)) - } - - log.WithFields(log.Fields{ - "aggregates": vserverAggrs, - }).Debug("Read aggregates assigned to SVM.") - - // Get options - opts, err := d.GetVolumeOpts(volConfig, storagePool, volAttributes) - if err != nil { - return err - } - - // get options with default fallback values - // see also: ontap_common.go#PopulateConfigurationDefaults - spaceReserve := utils.GetV(opts, "spaceReserve", d.Config.SpaceReserve) - snapshotPolicy := utils.GetV(opts, "snapshotPolicy", d.Config.SnapshotPolicy) - snapshotReserve := utils.GetV(opts, "snapshotReserve", d.Config.SnapshotReserve) - unixPermissions := utils.GetV(opts, "unixPermissions", d.Config.UnixPermissions) - snapshotDir := utils.GetV(opts, "snapshotDir", d.Config.SnapshotDir) - exportPolicy := utils.GetV(opts, "exportPolicy", d.Config.ExportPolicy) - securityStyle := utils.GetV(opts, "securityStyle", d.Config.SecurityStyle) - encryption := utils.GetV(opts, "encryption", d.Config.Encryption) - - // limits checks are not currently applicable to the Flexgroups driver, ommited here on purpose - - enableSnapshotDir, err := strconv.ParseBool(snapshotDir) - if err != nil { - return fmt.Errorf("invalid boolean value for snapshotDir: %v", err) - } - - enableEncryption, err := strconv.ParseBool(encryption) - if err != nil { - return fmt.Errorf("invalid boolean value for encryption: %v", err) - } - - snapshotReserveInt, err := GetSnapshotReserve(snapshotPolicy, snapshotReserve) - if err != nil { - return fmt.Errorf("invalid value for snapshotReserve: %v", err) - } - - log.WithFields(log.Fields{ - "name": name, - "size": size, - "spaceReserve": spaceReserve, - "snapshotPolicy": snapshotPolicy, - "snapshotReserve": snapshotReserveInt, - "unixPermissions": unixPermissions, - "snapshotDir": enableSnapshotDir, - "exportPolicy": exportPolicy, - "aggregates": vserverAggrNames, - "securityStyle": securityStyle, - "encryption": enableEncryption, - }).Debug("Creating FlexGroup.") - - // Create the FlexGroup - _, err = d.API.FlexGroupCreate( - name, size, vserverAggrNames, spaceReserve, snapshotPolicy, unixPermissions, - exportPolicy, securityStyle, enableEncryption, snapshotReserveInt) - - if err != nil { - return fmt.Errorf("error creating FlexGroup %v: %v", name, err) - } - - // Disable '.snapshot' to allow official mysql container's chmod-in-init to work - if !enableSnapshotDir { - _, err := d.API.FlexGroupVolumeDisableSnapshotDirectoryAccess(name) - if err != nil { - return fmt.Errorf("error disabling snapshot directory access: %v", err) - } - } - - // Mount the volume at the specified junction - mountResponse, err := d.API.VolumeMount(name, "/"+name) - if err = api.GetError(mountResponse, err); err != nil { - return fmt.Errorf("error mounting volume to junction: %v", err) - } - - return nil -} - -// CreateClone creates a volume clone -func (d *NASFlexGroupStorageDriver) CreateClone(volConfig *storage.VolumeConfig) error { - return errors.New("clones are not supported for FlexGroups") -} - -// Import brings an existing volume under trident's control -func (d *NASFlexGroupStorageDriver) Import(volConfig *storage.VolumeConfig, originalName string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Import", - "Type": "NASFlexGroupStorageDriver", - "originalName": originalName, - "notManaged": volConfig.ImportNotManaged, - } - log.WithFields(fields).Debug(">>>> Import") - defer log.WithFields(fields).Debug("<<<< Import") - } - - // Ensure the volume exists - flexgroup, err := d.API.FlexGroupGet(originalName) - if err != nil { - return err - } else if flexgroup == nil { - return fmt.Errorf("could not import volume %s, volume not found", originalName) - } - - // Validate the volume is what it should be - if flexgroup.VolumeIdAttributesPtr != nil { - volumeIdAttrs := flexgroup.VolumeIdAttributes() - if volumeIdAttrs.TypePtr != nil && volumeIdAttrs.Type() != "rw" { - log.WithField("originalName", originalName).Error("Could not import volume, type is not rw.") - return fmt.Errorf("could not import volume %s, type is %s, not rw", originalName, volumeIdAttrs.Type()) - } - } - - // Get the volume size - if flexgroup.VolumeSpaceAttributesPtr == nil || flexgroup.VolumeSpaceAttributesPtr.SizePtr == nil { - log.WithField("originalName", originalName).Errorf("Could not import volume, size not available") - return fmt.Errorf("could not import volume %s, size not available", originalName) - } - volConfig.Size = strconv.FormatInt(int64(flexgroup.VolumeSpaceAttributesPtr.Size()), 10) - - // We cannot rename flexgroups, so internal name should match the imported originalName - volConfig.InternalName = originalName - - // Make sure we're not importing a volume without a junction path when not managed - if volConfig.ImportNotManaged { - if flexgroup.VolumeIdAttributesPtr == nil { - return fmt.Errorf("unable to read volume id attributes of volume %s", originalName) - } else if flexgroup.VolumeIdAttributesPtr.JunctionPathPtr == nil || flexgroup.VolumeIdAttributesPtr. - JunctionPath() == "" { - return fmt.Errorf("junction path is not set for volume %s", originalName) - } - } - - return nil -} - -// Rename changes the name of a volume -func (d *NASFlexGroupStorageDriver) Rename(name string, newName string) error { - // Flexgroups cannot be renamed - return nil -} - -// Destroy the volume -func (d *NASFlexGroupStorageDriver) Destroy(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Destroy", - "Type": "NASFlexGroupStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Destroy") - defer log.WithFields(fields).Debug("<<<< Destroy") - } - - // Needed once FlexGroups support clones - // TODO: If this is the parent of one or more clones, those clones have to split from this - // volume before it can be deleted, which means separate copies of those volumes. - // If there are a lot of clones on this volume, that could seriously balloon the amount of - // utilized space. Is that what we want? Or should we just deny the delete, and force the - // user to keep the volume around until all of the clones are gone? If we do that, need a - // way to list the clones. Maybe volume inspect. - - _, err := d.API.FlexGroupDestroy(name, true) - if err != nil { - return fmt.Errorf("error destroying FlexGroup %v: %v", name, err) - } - - return nil -} - -// Publish the volume to the host specified in publishInfo. This method may or may not be running on the host -// where the volume will be mounted, so it should limit itself to updating access rules, initiator groups, etc. -// that require some host identity (but not locality) as well as storage controller API access. -func (d *NASFlexGroupStorageDriver) Publish(name string, publishInfo *utils.VolumePublishInfo) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Publish", - "Type": "NASFlexGroupStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Publish") - defer log.WithFields(fields).Debug("<<<< Publish") - } - - // Add fields needed by Attach - publishInfo.NfsPath = fmt.Sprintf("/%s", name) - publishInfo.NfsServerIP = d.Config.DataLIF - publishInfo.FilesystemType = "nfs" - publishInfo.MountOptions = d.Config.NfsMountOptions - - return nil -} - -// GetSnapshot gets a snapshot. To distinguish between an API error reading the snapshot -// and a non-existent snapshot, this method may return (nil, nil). -func (d *NASFlexGroupStorageDriver) GetSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshot", - "Type": "NASFlexGroupStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshot") - defer log.WithFields(fields).Debug("<<<< GetSnapshot") - } - - //return GetSnapshot(snapConfig, &d.Config, d.API, d.API.FlexGroupSize) - return nil, drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// Return the list of snapshots associated with the specified volume -func (d *NASFlexGroupStorageDriver) GetSnapshots(volConfig *storage.VolumeConfig) ([]*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshots", - "Type": "NASFlexGroupStorageDriver", - "volumeName": volConfig.InternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshots") - defer log.WithFields(fields).Debug("<<<< GetSnapshots") - } - - //return GetSnapshots(volConfig, &d.Config, d.API, d.API.FlexGroupSize) - return make([]*storage.Snapshot, 0), nil -} - -// CreateSnapshot creates a snapshot for the given volume -func (d *NASFlexGroupStorageDriver) CreateSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateSnapshot", - "Type": "NASFlexGroupStorageDriver", - "snapshotName": internalSnapName, - "sourceVolume": internalVolName, - } - log.WithFields(fields).Debug(">>>> CreateSnapshot") - defer log.WithFields(fields).Debug("<<<< CreateSnapshot") - } - - //return CreateSnapshot(snapConfig, &d.Config, d.API, d.API.FlexGroupSize) - return nil, drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// RestoreSnapshot restores a volume (in place) from a snapshot. -func (d *NASFlexGroupStorageDriver) RestoreSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "RestoreSnapshot", - "Type": "NASFlexGroupStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> RestoreSnapshot") - defer log.WithFields(fields).Debug("<<<< RestoreSnapshot") - } - - //return RestoreSnapshot(snapConfig, &d.Config, d.API) - return drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// DeleteSnapshot creates a snapshot of a volume. -func (d *NASFlexGroupStorageDriver) DeleteSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "DeleteSnapshot", - "Type": "NASFlexGroupStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> DeleteSnapshot") - defer log.WithFields(fields).Debug("<<<< DeleteSnapshot") - } - - //return DeleteSnapshot(snapConfig, &d.Config, d.API) - return drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// Tests the existence of a FlexGroup. Returns nil if the FlexGroup -// exists and an error otherwise. -func (d *NASFlexGroupStorageDriver) Get(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Get", "Type": "NASFlexGroupStorageDriver"} - log.WithFields(fields).Debug(">>>> Get") - defer log.WithFields(fields).Debug("<<<< Get") - } - - volExists, err := d.API.FlexGroupExists(name) - if err != nil { - return fmt.Errorf("error checking for existing volume: %v", err) - } - if !volExists { - log.WithField("FlexGroup", name).Debug("FlexGroup not found.") - return fmt.Errorf("volume %s does not exist", name) - } - - return nil -} - -// Retrieve storage backend capabilities -func (d *NASFlexGroupStorageDriver) GetStorageBackendSpecs(backend *storage.Backend) error { - if d.Config.BackendName == "" { - // Use the old naming scheme if no name is specified - backend.Name = "ontapnasfg_" + d.Config.DataLIF - } else { - backend.Name = d.Config.BackendName - } - poolAttrs := d.getStoragePoolAttributes() - return d.getStorageBackendSpecs(backend, poolAttrs) -} - -// getStorageBackendSpecsCommon discovers the aggregates assigned to the configured SVM. The aggregates assigned to -// a SVM represent a single StoragePool for a FlexGroup. The default attributes for a FlexGroup are assigned to the pool. -func (d *NASFlexGroupStorageDriver) getStorageBackendSpecs( - backend *storage.Backend, poolAttributes map[string]sa.Offer) (err error) { - - config := d.GetConfig() - vserverAggrs, err := d.vserverAggregates(config.SVM) - if err != nil { - return err - } - - log.WithFields(log.Fields{ - "svm": config.SVM, - "aggregates": vserverAggrs, - }).Debug("Read aggregates assigned to SVM.") - - // For a FlexGroup all aggregates that belong to the SVM represent the storage pool. - pool := storage.NewStoragePool(backend, config.SVM) - for attrName, offer := range poolAttributes { - pool.Attributes[attrName] = offer - } - backend.AddStoragePool(pool) - - return -} - -func (d *NASFlexGroupStorageDriver) vserverAggregates(svmName string) ([]string, error) { - var err error - // Get the aggregates assigned to the SVM. There must be at least one! - vserverAggrs, err := d.API.VserverGetAggregateNames() - if err != nil { - return nil, err - } - if len(vserverAggrs) == 0 { - err = fmt.Errorf("SVM %s has no assigned aggregates", svmName) - return nil, err - } - - return vserverAggrs, nil -} - -func (d *NASFlexGroupStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { - - return map[string]sa.Offer{ - sa.BackendType: sa.NewStringOffer(d.Name()), - sa.Snapshots: sa.NewBoolOffer(true), - sa.Encryption: sa.NewBoolOffer(true), - sa.Clones: sa.NewBoolOffer(false), - sa.ProvisioningType: sa.NewStringOffer("thick", "thin"), - } -} - -func (d *NASFlexGroupStorageDriver) GetVolumeOpts( - volConfig *storage.VolumeConfig, - pool *storage.Pool, - requests map[string]sa.Request, -) (map[string]string, error) { - return getVolumeOptsCommon(volConfig, pool, requests), nil -} - -func (d *NASFlexGroupStorageDriver) GetInternalVolumeName(name string) string { - return getInternalVolumeNameCommon(d.Config.CommonStorageDriverConfig, name) -} - -func (d *NASFlexGroupStorageDriver) CreatePrepare(volConfig *storage.VolumeConfig) error { - return createPrepareCommon(d, volConfig) -} - -func (d *NASFlexGroupStorageDriver) CreateFollowup(volConfig *storage.VolumeConfig) error { - - volConfig.AccessInfo.NfsServerIP = d.Config.DataLIF - volConfig.AccessInfo.MountOptions = strings.TrimPrefix(d.Config.NfsMountOptions, "-o ") - volConfig.FileSystem = "" - - // Set correct junction path - flexgroup, err := d.API.FlexGroupGet(volConfig.InternalName) - if err != nil { - return err - } else if flexgroup == nil { - return fmt.Errorf("could not create volume %s, volume not found", volConfig.InternalName) - } - - if flexgroup.VolumeIdAttributesPtr == nil { - return errors.New("error reading volume id attributes") - } - if flexgroup.VolumeIdAttributesPtr.JunctionPathPtr == nil || flexgroup.VolumeIdAttributesPtr.JunctionPath() == "" { - // Flexgroup is not mounted, we need to mount it - volConfig.AccessInfo.NfsPath = "/" + volConfig.InternalName - mountResponse, err := d.API.VolumeMount(volConfig.InternalName, volConfig.AccessInfo.NfsPath) - if err = api.GetError(mountResponse, err); err != nil { - return fmt.Errorf("error mounting volume to junction %s; %v", volConfig.AccessInfo.NfsPath, err) - } - } else { - volConfig.AccessInfo.NfsPath = flexgroup.VolumeIdAttributesPtr.JunctionPath() - } - - return nil -} - -func (d *NASFlexGroupStorageDriver) GetProtocol() tridentconfig.Protocol { - return tridentconfig.File -} - -func (d *NASFlexGroupStorageDriver) StoreConfig( - b *storage.PersistentStorageBackendConfig, -) { - drivers.SanitizeCommonStorageDriverConfig(d.Config.CommonStorageDriverConfig) - b.OntapConfig = &d.Config -} - -func (d *NASFlexGroupStorageDriver) GetExternalConfig() interface{} { - return getExternalConfig(d.Config) -} - -// GetVolumeExternal queries the storage backend for all relevant info about -// a single container volume managed by this driver and returns a VolumeExternal -// representation of the volume. -func (d *NASFlexGroupStorageDriver) GetVolumeExternal(name string) (*storage.VolumeExternal, error) { - - volumeAttributes, err := d.API.FlexGroupGet(name) - if err != nil { - return nil, err - } - - return d.getVolumeExternal(volumeAttributes), nil -} - -// GetVolumeExternalWrappers queries the storage backend for all relevant info about -// container volumes managed by this driver. It then writes a VolumeExternal -// representation of each volume to the supplied channel, closing the channel -// when finished. -func (d *NASFlexGroupStorageDriver) GetVolumeExternalWrappers( - channel chan *storage.VolumeExternalWrapper) { - - // Let the caller know we're done by closing the channel - defer close(channel) - - // Get all volumes matching the storage prefix - volumesResponse, err := d.API.FlexGroupGetAll(*d.Config.StoragePrefix) - if err = api.GetError(volumesResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Convert all volumes to VolumeExternal and write them to the channel - if volumesResponse.Result.AttributesListPtr != nil { - for _, volume := range volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr { - channel <- &storage.VolumeExternalWrapper{Volume: d.getVolumeExternal(&volume), Error: nil} - } - } -} - -// getVolumeExternal is a private method that accepts info about a volume -// as returned by the storage backend and formats it as a VolumeExternal -// object. -func (d *NASFlexGroupStorageDriver) getVolumeExternal( - volumeAttrs *azgo.VolumeAttributesType) *storage.VolumeExternal { - - volumeExportAttrs := volumeAttrs.VolumeExportAttributesPtr - volumeIDAttrs := volumeAttrs.VolumeIdAttributesPtr - volumeSecurityAttrs := volumeAttrs.VolumeSecurityAttributesPtr - volumeSecurityUnixAttrs := volumeSecurityAttrs.VolumeSecurityUnixAttributesPtr - volumeSpaceAttrs := volumeAttrs.VolumeSpaceAttributesPtr - volumeSnapshotAttrs := volumeAttrs.VolumeSnapshotAttributesPtr - - internalName := string(volumeIDAttrs.Name()) - name := internalName - if strings.HasPrefix(internalName, *d.Config.StoragePrefix) { - name = internalName[len(*d.Config.StoragePrefix):] - } - - volumeConfig := &storage.VolumeConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: name, - InternalName: internalName, - Size: strconv.FormatInt(int64(volumeSpaceAttrs.Size()), 10), - Protocol: tridentconfig.File, - SnapshotPolicy: volumeSnapshotAttrs.SnapshotPolicy(), - ExportPolicy: volumeExportAttrs.Policy(), - SnapshotDir: strconv.FormatBool(volumeSnapshotAttrs.SnapdirAccessEnabled()), - UnixPermissions: volumeSecurityUnixAttrs.Permissions(), - StorageClass: "", - AccessMode: tridentconfig.ReadWriteMany, - AccessInfo: utils.VolumeAccessInfo{}, - BlockSize: "", - FileSystem: "", - } - - return &storage.VolumeExternal{ - Config: volumeConfig, - Pool: volumeIDAttrs.OwningVserverName(), - } -} - -// GetUpdateType returns a bitmap populated with updates to the driver -func (d *NASFlexGroupStorageDriver) GetUpdateType(driverOrig storage.Driver) *roaring.Bitmap { - bitmap := roaring.New() - dOrig, ok := driverOrig.(*NASFlexGroupStorageDriver) - if !ok { - bitmap.Add(storage.InvalidUpdate) - return bitmap - } - - if d.Config.DataLIF != dOrig.Config.DataLIF { - bitmap.Add(storage.VolumeAccessInfoChange) - } - - if d.Config.Password != dOrig.Config.Password { - bitmap.Add(storage.PasswordChange) - } - - if d.Config.Username != dOrig.Config.Username { - bitmap.Add(storage.UsernameChange) - } - - return bitmap -} - -// Resize expands the FlexGroup size. -func (d *NASFlexGroupStorageDriver) Resize(volConfig *storage.VolumeConfig, sizeBytes uint64) error { - - name := volConfig.InternalName - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Resize", - "Type": "NASFlexGroupStorageDriver", - "name": name, - "sizeBytes": sizeBytes, - } - log.WithFields(fields).Debug(">>>> Resize") - defer log.WithFields(fields).Debug("<<<< Resize") - } - - flexvolSize, err := resizeValidation(name, sizeBytes, d.API.FlexGroupExists, d.API.FlexGroupSize) - if err != nil { - return err - } - - if flexvolSize == sizeBytes { - return nil - } - - _, err = d.API.FlexGroupSetSize(name, strconv.FormatUint(sizeBytes, 10)) - if err != nil { - log.WithField("error", err).Error("FlexGroup resize failed.") - return fmt.Errorf("flexgroup resize failed") - } - - volConfig.Size = strconv.FormatUint(sizeBytes, 10) - return nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas_qtree.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas_qtree.go deleted file mode 100644 index 5ae0fff30..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_nas_qtree.go +++ /dev/null @@ -1,1647 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package ontap - -import ( - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "time" - - "github.com/RoaringBitmap/roaring" - "github.com/google/uuid" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/storage_drivers/ontap/api" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -const ( - deletedQtreeNamePrefix = "deleted_" - maxQtreeNameLength = 64 - maxQtreesPerFlexvol = 200 - defaultPruneFlexvolsPeriodSecs = uint64(600) // default to 10 minutes - defaultResizeQuotasPeriodSecs = uint64(60) // default to 1 minute - defaultEmptyFlexvolDeferredDeletePeriodSecs = uint64(28800) // default to 8 hours - pruneTask = "prune" - resizeTask = "resize" -) - -// NASQtreeStorageDriver is for NFS storage provisioning of qtrees -type NASQtreeStorageDriver struct { - initialized bool - Config drivers.OntapStorageDriverConfig - API *api.Client - Telemetry *Telemetry - quotaResizeMap map[string]bool - flexvolNamePrefix string - flexvolExportPolicy string - housekeepingTasks map[string]*HousekeepingTask - housekeepingWaitGroup *sync.WaitGroup - sharedLockID string - emptyFlexvolMap map[string]time.Time - emptyFlexvolDeferredDeletePeriod time.Duration -} - -func (d *NASQtreeStorageDriver) GetConfig() *drivers.OntapStorageDriverConfig { - return &d.Config -} - -func (d *NASQtreeStorageDriver) GetAPI() *api.Client { - return d.API -} - -func (d *NASQtreeStorageDriver) GetTelemetry() *Telemetry { - d.Telemetry.Telemetry = tridentconfig.OrchestratorTelemetry - return d.Telemetry -} - -// Name is for returning the name of this driver -func (d *NASQtreeStorageDriver) Name() string { - return drivers.OntapNASQtreeStorageDriverName -} - -func (d *NASQtreeStorageDriver) FlexvolNamePrefix() string { - return d.flexvolNamePrefix -} - -// Initialize from the provided config -func (d *NASQtreeStorageDriver) Initialize( - context tridentconfig.DriverContext, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, -) error { - - if commonConfig.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Initialize", "Type": "NASQtreeStorageDriver"} - log.WithFields(fields).Debug(">>>> Initialize") - defer log.WithFields(fields).Debug("<<<< Initialize") - } - - // Parse the config - config, err := InitializeOntapConfig(context, configJSON, commonConfig) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - d.API, err = InitializeOntapDriver(config) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - // Remap context for artifact naming so the names remain stable over time - var artifactPrefix string - switch context { - case tridentconfig.ContextDocker: - artifactPrefix = artifactPrefixDocker - case tridentconfig.ContextKubernetes, tridentconfig.ContextCSI: - artifactPrefix = artifactPrefixKubernetes - default: - return fmt.Errorf("unknown driver context: %s", context) - } - - // Set up internal driver state - d.quotaResizeMap = make(map[string]bool) - d.flexvolNamePrefix = fmt.Sprintf("%s_qtree_pool_%s_", artifactPrefix, *d.Config.StoragePrefix) - d.flexvolNamePrefix = strings.Replace(d.flexvolNamePrefix, "__", "_", -1) - d.flexvolExportPolicy = fmt.Sprintf("%s_qtree_pool_export_policy", artifactPrefix) - d.sharedLockID = d.API.SVMUUID + "-" + *d.Config.StoragePrefix - d.emptyFlexvolMap = make(map[string]time.Time) - - log.WithFields(log.Fields{ - "FlexvolNamePrefix": d.flexvolNamePrefix, - "FlexvolExportPolicy": d.flexvolExportPolicy, - "SharedLockID": d.sharedLockID, - }).Debugf("Qtree driver settings.") - - err = d.validate() - if err != nil { - return fmt.Errorf("error validating %s driver: %v", d.Name(), err) - } - - // Ensure all quotas are in force after a driver restart - d.queueAllFlexvolsForQuotaResize() - - // Start periodic housekeeping tasks like cleaning up unused Flexvols - d.housekeepingWaitGroup = &sync.WaitGroup{} - d.housekeepingTasks = make(map[string]*HousekeepingTask, 2) - //pruneTasks := []func(){d.pruneUnusedFlexvols, d.reapDeletedQtrees} - //d.housekeepingTasks[pruneTask] = NewPruneTask(d, pruneTasks) - resizeTasks := []func(){d.resizeQuotas} - d.housekeepingTasks[resizeTask] = NewResizeTask(d, resizeTasks) - for _, task := range d.housekeepingTasks { - task.Start() - } - - // Set up the autosupport heartbeat - d.Telemetry = NewOntapTelemetry(d) - d.Telemetry.Start() - - d.initialized = true - return nil -} - -func (d *NASQtreeStorageDriver) Initialized() bool { - return d.initialized -} - -func (d *NASQtreeStorageDriver) Terminate() { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Terminate", "Type": "NASQtreeStorageDriver"} - log.WithFields(fields).Debug(">>>> Terminate") - defer log.WithFields(fields).Debug("<<<< Terminate") - } - - if d.housekeepingWaitGroup != nil { - for _, task := range d.housekeepingTasks { - task.Stop() - } - } - - if d.Telemetry != nil { - d.Telemetry.Stop() - } - - if d.housekeepingWaitGroup != nil { - log.Debug("Waiting for housekeeping tasks to exit.") - d.housekeepingWaitGroup.Wait() - } - - d.initialized = false -} - -// Validate the driver configuration and execution environment -func (d *NASQtreeStorageDriver) validate() error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "validate", "Type": "NASQtreeStorageDriver"} - log.WithFields(fields).Debug(">>>> validate") - defer log.WithFields(fields).Debug("<<<< validate") - } - - err := ValidateNASDriver(d.API, &d.Config) - if err != nil { - return fmt.Errorf("driver validation failed: %v", err) - } - - // Make sure we have an export policy for all the Flexvols we create - err = d.ensureDefaultExportPolicy() - if err != nil { - return fmt.Errorf("error configuring export policy: %v", err) - } - - return nil -} - -// Create a qtree-backed volume with the specified options -func (d *NASQtreeStorageDriver) Create( - volConfig *storage.VolumeConfig, storagePool *storage.Pool, volAttributes map[string]sa.Request, -) error { - - name := volConfig.InternalName - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Create", - "Type": "NASQtreeStorageDriver", - "name": name, - "attrs": volAttributes, - } - log.WithFields(fields).Debug(">>>> Create") - defer log.WithFields(fields).Debug("<<<< Create") - } - - // Ensure any Flexvol we create won't be pruned before we place a qtree on it - utils.Lock("create", d.sharedLockID) - defer utils.Unlock("create", d.sharedLockID) - - // Generic user-facing message - createError := errors.New("volume creation failed") - - // Ensure volume doesn't already exist - exists, existsInFlexvol, err := d.API.QtreeExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing volume: %v.", err) - return createError - } - if exists { - log.WithFields(log.Fields{"qtree": name, "flexvol": existsInFlexvol}).Debug("Qtree already exists.") - return drivers.NewVolumeExistsError(name) - } - - // Determine volume size in bytes - requestedSize, err := utils.ConvertSizeToBytes(volConfig.Size) - if err != nil { - return fmt.Errorf("could not convert volume size %s: %v", volConfig.Size, err) - } - sizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) - if err != nil { - return fmt.Errorf("%v is an invalid volume size: %v", volConfig.Size, err) - } - sizeBytes, err = GetVolumeSize(sizeBytes, d.Config) - if err != nil { - return err - } - - // Ensure qtree name isn't too long - if len(name) > maxQtreeNameLength { - return fmt.Errorf("volume %s name exceeds the limit of %d characters", name, maxQtreeNameLength) - } - - // Get options - opts, err := d.GetVolumeOpts(volConfig, storagePool, volAttributes) - if err != nil { - return err - } - - // Get Flexvol options with default fallback values - // see also: ontap_common.go#PopulateConfigurationDefaults - aggregate := utils.GetV(opts, "aggregate", d.Config.Aggregate) - spaceReserve := utils.GetV(opts, "spaceReserve", d.Config.SpaceReserve) - snapshotPolicy := utils.GetV(opts, "snapshotPolicy", d.Config.SnapshotPolicy) - snapshotDir := utils.GetV(opts, "snapshotDir", d.Config.SnapshotDir) - encryption := utils.GetV(opts, "encryption", d.Config.Encryption) - - if aggrLimitsErr := checkAggregateLimits(aggregate, spaceReserve, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - enableSnapshotDir, err := strconv.ParseBool(snapshotDir) - if err != nil { - return fmt.Errorf("invalid boolean value for snapshotDir: %v", err) - } - - enableEncryption, err := strconv.ParseBool(encryption) - if err != nil { - return fmt.Errorf("invalid boolean value for encryption: %v", err) - } - - // Make sure we have a Flexvol for the new qtree - flexvol, err := d.ensureFlexvolForQtree( - aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, enableEncryption, sizeBytes, opts, d.Config) - if err != nil { - log.Errorf("Flexvol location/creation failed. %v", err) - return createError - } - - // Grow or shrink the Flexvol as needed - err = d.resizeFlexvol(flexvol, sizeBytes) - if err != nil { - return createError - } - - // Get qtree options with default fallback values - unixPermissions := utils.GetV(opts, "unixPermissions", d.Config.UnixPermissions) - exportPolicy := utils.GetV(opts, "exportPolicy", d.Config.ExportPolicy) - securityStyle := utils.GetV(opts, "securityStyle", d.Config.SecurityStyle) - - // Create the qtree - qtreeResponse, err := d.API.QtreeCreate(name, flexvol, unixPermissions, exportPolicy, securityStyle) - if err = api.GetError(qtreeResponse, err); err != nil { - log.Errorf("Qtree creation failed. %v", err) - return createError - } - - // Add the quota - err = d.setQuotaForQtree(name, flexvol, sizeBytes) - if err != nil { - log.Errorf("Qtree quota definition failed. %v", err) - return createError - } - - return nil -} - -// Create a volume clone -func (d *NASQtreeStorageDriver) CreateClone(volConfig *storage.VolumeConfig) error { - - name := volConfig.InternalName - source := volConfig.CloneSourceVolumeInternal - snapshot := volConfig.CloneSourceSnapshot - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateClone", - "Type": "NASQtreeStorageDriver", - "name": name, - "source": source, - "snapshot": snapshot, - } - log.WithFields(fields).Debug(">>>> CreateClone") - defer log.WithFields(fields).Debug("<<<< CreateClone") - } - - return fmt.Errorf("cloning is not supported by backend type %s", d.Name()) -} - -func (d *NASQtreeStorageDriver) Import(volConfig *storage.VolumeConfig, originalName string) error { - return errors.New("import is not implemented") -} - -func (d *NASQtreeStorageDriver) Rename(name string, newName string) error { - return errors.New("rename is not implemented") -} - -// Destroy the volume -func (d *NASQtreeStorageDriver) Destroy(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Destroy", - "Type": "NASQtreeStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Destroy") - defer log.WithFields(fields).Debug("<<<< Destroy") - } - - // Ensure the deleted qtree reaping job doesn't interfere with this workflow - utils.Lock("destroy", d.sharedLockID) - defer utils.Unlock("destroy", d.sharedLockID) - - // Generic user-facing message - deleteError := errors.New("volume deletion failed") - - exists, flexvol, err := d.API.QtreeExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing qtree. %v", err) - return deleteError - } - if !exists { - log.WithField("qtree", name).Warn("Qtree not found.") - return nil - } - - // Rename qtree so it doesn't show up in lists while ONTAP is deleting it in the background. - // Ensure the deleted name doesn't exceed the qtree name length limit of 64 characters. - path := fmt.Sprintf("/vol/%s/%s", flexvol, name) - deletedName := deletedQtreeNamePrefix + name + "_" + utils.RandomString(5) - if len(deletedName) > maxQtreeNameLength { - trimLength := len(deletedQtreeNamePrefix) + 10 - deletedName = deletedQtreeNamePrefix + name[trimLength:] + "_" + utils.RandomString(5) - } - deletedPath := fmt.Sprintf("/vol/%s/%s", flexvol, deletedName) - - renameResponse, err := d.API.QtreeRename(path, deletedPath) - if err = api.GetError(renameResponse, err); err != nil { - log.Errorf("Qtree rename failed. %v", err) - return deleteError - } - - // Destroy the qtree in the background. If this fails, try to restore the original qtree name. - destroyResponse, err := d.API.QtreeDestroyAsync(deletedPath, true) - if err = api.GetError(destroyResponse, err); err != nil { - log.Errorf("Qtree async delete failed. %v", err) - defer d.API.QtreeRename(deletedPath, path) - return deleteError - } - - return nil -} - -// Publish the volume to the host specified in publishInfo. This method may or may not be running on the host -// where the volume will be mounted, so it should limit itself to updating access rules, initiator groups, etc. -// that require some host identity (but not locality) as well as storage controller API access. -func (d *NASQtreeStorageDriver) Publish(name string, publishInfo *utils.VolumePublishInfo) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Publish", - "Type": "NASQtreeStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Publish") - defer log.WithFields(fields).Debug("<<<< Publish") - } - - // Check if qtree exists, and find its Flexvol so we can build the export location - exists, flexvol, err := d.API.QtreeExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing qtree. %v", err) - return errors.New("volume mount failed") - } - if !exists { - log.WithField("qtree", name).Debug("Qtree not found.") - return fmt.Errorf("volume %s not found", name) - } - - // Add fields needed by Attach - publishInfo.NfsPath = fmt.Sprintf("/%s/%s", flexvol, name) - publishInfo.NfsServerIP = d.Config.DataLIF - publishInfo.FilesystemType = "nfs" - publishInfo.MountOptions = d.Config.NfsMountOptions - - return nil -} - -// GetSnapshot returns a snapshot of a volume, or an error if it does not exist. -func (d *NASQtreeStorageDriver) GetSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshot", - "Type": "NASQtreeStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshot") - defer log.WithFields(fields).Debug("<<<< GetSnapshot") - } - - return nil, drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// Return the list of snapshots associated with the specified volume -func (d *NASQtreeStorageDriver) GetSnapshots(volConfig *storage.VolumeConfig) ([]*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshots", - "Type": "NASQtreeStorageDriver", - "volumeName": volConfig.InternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshots") - defer log.WithFields(fields).Debug("<<<< GetSnapshots") - } - - // Qtrees can't have snapshots, so return an empty list - return []*storage.Snapshot{}, nil -} - -// CreateSnapshot creates a snapshot for the given volume -func (d *NASQtreeStorageDriver) CreateSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateSnapshot", - "Type": "NASQtreeStorageDriver", - "snapshotName": snapConfig.InternalName, - "sourceVolume": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> CreateSnapshot") - defer log.WithFields(fields).Debug("<<<< CreateSnapshot") - } - - return nil, drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// RestoreSnapshot restores a volume (in place) from a snapshot. -func (d *NASQtreeStorageDriver) RestoreSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "RestoreSnapshot", - "Type": "NASQtreeStorageDriver", - "snapshotName": snapConfig.InternalName, - "sourceVolume": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> RestoreSnapshot") - defer log.WithFields(fields).Debug("<<<< RestoreSnapshot") - } - - return drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// DeleteSnapshot creates a snapshot of a volume. -func (d *NASQtreeStorageDriver) DeleteSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "DeleteSnapshot", - "Type": "NASQtreeStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> DeleteSnapshot") - defer log.WithFields(fields).Debug("<<<< DeleteSnapshot") - } - - return drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// Test for the existence of a volume -func (d *NASQtreeStorageDriver) Get(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Get", "Type": "NASQtreeStorageDriver"} - log.WithFields(fields).Debug(">>>> Get") - defer log.WithFields(fields).Debug("<<<< Get") - } - - // Generic user-facing message - getError := fmt.Errorf("volume %s not found", name) - - exists, flexvol, err := d.API.QtreeExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing qtree. %v", err) - return getError - } - if !exists { - log.WithField("qtree", name).Debug("Qtree not found.") - return getError - } - - log.WithFields(log.Fields{"qtree": name, "flexvol": flexvol}).Debug("Qtree found.") - - return nil -} - -// ensureFlexvolForQtree accepts a set of Flexvol characteristics and either finds one to contain a new -// qtree or it creates a new Flexvol with the needed attributes. -func (d *NASQtreeStorageDriver) ensureFlexvolForQtree( - aggregate, spaceReserve, snapshotPolicy string, enableSnapshotDir bool, enableEncryption bool, - sizeBytes uint64, opts map[string]string, config drivers.OntapStorageDriverConfig, -) (string, error) { - - shouldLimitVolumeSize, flexvolQuotaSizeLimit, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits( - sizeBytes, config.CommonStorageDriverConfig) - if checkVolumeSizeLimitsError != nil { - return "", checkVolumeSizeLimitsError - } - - // Check if a suitable Flexvol already exists - flexvol, err := d.getFlexvolForQtree(aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, - enableEncryption, sizeBytes, shouldLimitVolumeSize, flexvolQuotaSizeLimit) - if err != nil { - return "", fmt.Errorf("error finding Flexvol for qtree: %v", err) - } - - // Found one! - if flexvol != "" { - return flexvol, nil - } - - // Nothing found, so create a suitable Flexvol - flexvol, err = d.createFlexvolForQtree( - aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, enableEncryption) - if err != nil { - return "", fmt.Errorf("error creating Flexvol for qtree: %v", err) - } - - return flexvol, nil -} - -// createFlexvolForQtree creates a new Flexvol matching the specified attributes for -// the purpose of containing qtrees supplied as container volumes by this driver. -// Once this method returns, the Flexvol exists, is mounted, and has a default tree -// quota. -func (d *NASQtreeStorageDriver) createFlexvolForQtree( - aggregate, spaceReserve, snapshotPolicy string, enableSnapshotDir bool, enableEncryption bool, -) (string, error) { - - flexvol := d.FlexvolNamePrefix() + utils.RandomString(10) - size := "1g" - unixPermissions := "0711" - exportPolicy := d.flexvolExportPolicy - securityStyle := "unix" - - snapshotReserveInt, err := GetSnapshotReserve(snapshotPolicy, d.Config.SnapshotReserve) - if err != nil { - return "", fmt.Errorf("invalid value for snapshotReserve: %v", err) - } - - log.WithFields(log.Fields{ - "name": flexvol, - "aggregate": aggregate, - "size": size, - "spaceReserve": spaceReserve, - "snapshotPolicy": snapshotPolicy, - "snapshotReserve": snapshotReserveInt, - "unixPermissions": unixPermissions, - "snapshotDir": enableSnapshotDir, - "exportPolicy": exportPolicy, - "securityStyle": securityStyle, - "encryption": enableEncryption, - }).Debug("Creating Flexvol for qtrees.") - - // Create the Flexvol - createResponse, err := d.API.VolumeCreate( - flexvol, aggregate, size, spaceReserve, snapshotPolicy, unixPermissions, - exportPolicy, securityStyle, enableEncryption, snapshotReserveInt) - if err = api.GetError(createResponse, err); err != nil { - return "", fmt.Errorf("error creating Flexvol: %v", err) - } - - // Disable '.snapshot' as needed - if !enableSnapshotDir { - snapDirResponse, err := d.API.VolumeDisableSnapshotDirectoryAccess(flexvol) - if err = api.GetError(snapDirResponse, err); err != nil { - defer d.API.VolumeDestroy(flexvol, true) - return "", fmt.Errorf("error disabling snapshot directory access: %v", err) - } - } - - // Mount the volume at the specified junction - mountResponse, err := d.API.VolumeMount(flexvol, "/"+flexvol) - if err = api.GetError(mountResponse, err); err != nil { - defer d.API.VolumeDestroy(flexvol, true) - return "", fmt.Errorf("error mounting Flexvol: %v", err) - } - - // Create the default quota rule so we can use quota-resize for new qtrees - err = d.addDefaultQuotaForFlexvol(flexvol) - if err != nil { - defer d.API.VolumeDestroy(flexvol, true) - return "", fmt.Errorf("error adding default quota to Flexvol: %v", err) - } - - return flexvol, nil -} - -// getFlexvolForQtree returns a Flexvol (from the set of existing Flexvols) that -// matches the specified Flexvol attributes and does not already contain more -// than the maximum configured number of qtrees. No matching Flexvols is not -// considered an error. If more than one matching Flexvol is found, one of those -// is returned at random. -func (d *NASQtreeStorageDriver) getFlexvolForQtree( - aggregate, spaceReserve, snapshotPolicy string, enableSnapshotDir bool, enableEncryption bool, - sizeBytes uint64, shouldLimitFlexvolQuotaSize bool, flexvolQuotaSizeLimit uint64, -) (string, error) { - - // Get all volumes matching the specified attributes - volListResponse, err := d.API.VolumeListByAttrs( - d.FlexvolNamePrefix(), aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, enableEncryption) - - if err = api.GetError(volListResponse, err); err != nil { - return "", fmt.Errorf("error enumerating Flexvols: %v", err) - } - - // Weed out the Flexvols: - // 1) already having too many qtrees - // 2) exceeding size limits - var volumes []string - if volListResponse.Result.AttributesListPtr != nil { - for _, volAttrs := range volListResponse.Result.AttributesListPtr.VolumeAttributesPtr { - volIDAttrs := volAttrs.VolumeIdAttributes() - volName := string(volIDAttrs.Name()) - - // skip flexvols over the size limit - if shouldLimitFlexvolQuotaSize { - sizeWithRequest, err := d.getOptimalSizeForFlexvol(volName, sizeBytes) - if err != nil { - log.Errorf("Error checking size for existing qtree. %v %v", volName, err) - continue - } - if sizeWithRequest > flexvolQuotaSizeLimit { - log.Debugf("Flexvol quota size for %v is over the limit of %v", volName, flexvolQuotaSizeLimit) - continue - } - } - - count, err := d.API.QtreeCount(volName) - if err != nil { - return "", fmt.Errorf("error enumerating qtrees: %v", err) - } - - if count < maxQtreesPerFlexvol { - volumes = append(volumes, volName) - } - } - } - - // Pick a Flexvol. If there are multiple matches, pick one at random. - switch len(volumes) { - case 0: - return "", nil - case 1: - return volumes[0], nil - default: - return volumes[rand.Intn(len(volumes))], nil - } -} - -// getOptimalSizeForFlexvol sums up all the disk limit quota rules on a Flexvol and adds the size of -// the new qtree being added as well as the current Flexvol snapshot reserve. This value may be used -// to grow (or shrink) the Flexvol as new qtrees are being added. -func (d *NASQtreeStorageDriver) getOptimalSizeForFlexvol( - flexvol string, newQtreeSizeBytes uint64, -) (uint64, error) { - - // Get more info about the Flexvol - volAttrs, err := d.API.VolumeGet(flexvol) - if err != nil { - return 0, err - } - volSpaceAttrs := volAttrs.VolumeSpaceAttributes() - snapReserveDivisor := 1.0 - (float64(volSpaceAttrs.PercentageSnapshotReserve()) / 100.0) - - totalDiskLimitBytes, err := d.getTotalHardDiskLimitQuota(flexvol) - if err != nil { - return 0, err - } - - usableSpaceBytes := float64(newQtreeSizeBytes + totalDiskLimitBytes) - flexvolSizeBytes := uint64(usableSpaceBytes / snapReserveDivisor) - - log.WithFields(log.Fields{ - "flexvol": flexvol, - "snapReserveDivisor": snapReserveDivisor, - "totalDiskLimitBytes": totalDiskLimitBytes, - "newQtreeSizeBytes": newQtreeSizeBytes, - "flexvolSizeBytes": flexvolSizeBytes, - }).Debug("Calculated optimal size for Flexvol with new qtree.") - - return flexvolSizeBytes, nil -} - -// addDefaultQuotaForFlexvol adds a default quota rule to a Flexvol so that quotas for -// new qtrees may be added on demand with simple quota resize instead of a heavyweight -// quota reinitialization. -func (d *NASQtreeStorageDriver) addDefaultQuotaForFlexvol(flexvol string) error { - - response, err := d.API.QuotaSetEntry("", flexvol, "", "tree", "-") - if err = api.GetError(response, err); err != nil { - return fmt.Errorf("error adding default quota: %v", err) - } - - if err := d.disableQuotas(flexvol, true); err != nil { - log.Warningf("Could not disable quotas after adding a default quota: %v", err) - } - - if err := d.enableQuotas(flexvol, true); err != nil { - log.Warningf("Could not enable quotas after adding a default quota: %v", err) - } - - return nil -} - -// setQuotaForQtree adds a tree quota to a Flexvol/qtree with a hard disk size limit if it doesn't exist. -// If the quota already exists the hard disk size limit is updated. -func (d *NASQtreeStorageDriver) setQuotaForQtree(qtree, flexvol string, sizeBytes uint64) error { - - target := fmt.Sprintf("/vol/%s/%s", flexvol, qtree) - sizeKB := strconv.FormatUint(sizeBytes/1024, 10) - - response, err := d.API.QuotaSetEntry("", flexvol, target, "tree", sizeKB) - if err = api.GetError(response, err); err != nil { - return fmt.Errorf("error adding qtree quota: %v", err) - } - - // Mark this Flexvol as needing a quota resize - d.quotaResizeMap[flexvol] = true - - return nil -} - -// getQuotaDiskLimitSize returns the disk limit size for the specified quota. -func (d *NASQtreeStorageDriver) getQuotaDiskLimitSize(name string, flexvol string) (uint64, error) { - quotaTarget := fmt.Sprintf("/vol/%s/%s", flexvol, name) - quota, err := d.API.QuotaGetEntry(quotaTarget) - if err != nil { - return 0, err - } - - quotaSize := uint64(convertDiskLimitToBytes(quota.DiskLimit())) - if quotaSize == 0 { - return 0, fmt.Errorf("unable to determine quota size") - } - return quotaSize, nil -} - -// enableQuotas disables quotas on a Flexvol, optionally waiting for the operation to finish. -func (d *NASQtreeStorageDriver) disableQuotas(flexvol string, wait bool) error { - - status, err := d.getQuotaStatus(flexvol) - if err != nil { - return fmt.Errorf("error disabling quotas: %v", err) - } - if status == "corrupt" { - return fmt.Errorf("error disabling quotas: quotas are corrupt on Flexvol %s", flexvol) - } - - if status != "off" { - offResponse, err := d.API.QuotaOff(flexvol) - if err = api.GetError(offResponse, err); err != nil { - return fmt.Errorf("error disabling quotas: %v", err) - } - } - - if wait { - for status != "off" { - time.Sleep(1 * time.Second) - - status, err = d.getQuotaStatus(flexvol) - if err != nil { - return fmt.Errorf("error disabling quotas: %v", err) - } - if status == "corrupt" { - return fmt.Errorf("error disabling quotas: quotas are corrupt on flexvol %s", flexvol) - } - } - } - - return nil -} - -// enableQuotas enables quotas on a Flexvol, optionally waiting for the operation to finish. -func (d *NASQtreeStorageDriver) enableQuotas(flexvol string, wait bool) error { - - status, err := d.getQuotaStatus(flexvol) - if err != nil { - return fmt.Errorf("error enabling quotas: %v", err) - } - if status == "corrupt" { - return fmt.Errorf("error enabling quotas: quotas are corrupt on flexvol %s", flexvol) - } - - if status == "off" { - onResponse, err := d.API.QuotaOn(flexvol) - if err = api.GetError(onResponse, err); err != nil { - return fmt.Errorf("error enabling quotas: %v", err) - } - } - - if wait { - for status != "on" { - time.Sleep(1 * time.Second) - - status, err = d.getQuotaStatus(flexvol) - if err != nil { - return fmt.Errorf("error enabling quotas: %v", err) - } - if status == "corrupt" { - return fmt.Errorf("error enabling quotas: quotas are corrupt on flexvol %s", flexvol) - } - } - } - - return nil -} - -// queueAllFlexvolsForQuotaResize flags every Flexvol managed by this driver as -// needing a quota resize. This is called once on driver startup to handle the -// case where the driver was shut down with pending quota resize operations. -func (d *NASQtreeStorageDriver) queueAllFlexvolsForQuotaResize() { - - // Get list of Flexvols managed by this driver - volumeListResponse, err := d.API.VolumeList(d.FlexvolNamePrefix()) - if err = api.GetError(volumeListResponse, err); err != nil { - log.Errorf("Error listing Flexvols: %v", err) - } - - if volumeListResponse.Result.AttributesListPtr != nil { - for _, volAttrs := range volumeListResponse.Result.AttributesListPtr.VolumeAttributesPtr { - volIDAttrs := volAttrs.VolumeIdAttributes() - flexvol := string(volIDAttrs.Name()) - d.quotaResizeMap[flexvol] = true - } - } -} - -// resizeQuotas may be called by a background task, or by a method that changed -// the qtree population on a Flexvol. Flexvols needing an update must be flagged -// in quotaResizeMap. Any failures that occur are simply logged, and the resize -// operation will be attempted each time this method is called until it succeeds. -func (d *NASQtreeStorageDriver) resizeQuotas() { - - // Ensure we don't forget any Flexvol that is involved in a qtree provisioning workflow - utils.Lock("resize", d.sharedLockID) - defer utils.Unlock("resize", d.sharedLockID) - - log.Debug("Housekeeping, resizing quotas.") - - for flexvol, resize := range d.quotaResizeMap { - - if resize { - resizeResponse, err := d.API.QuotaResize(flexvol) - if err != nil { - log.WithFields(log.Fields{"flexvol": flexvol, "error": err}).Debug("Error resizing quotas.") - continue - } - if zerr := api.NewZapiError(resizeResponse); !zerr.IsPassed() { - - if zerr.Code() == azgo.EVOLUMEDOESNOTEXIST { - // Volume gone, so no need to try again - log.WithField("flexvol", flexvol).Debug("Volume does not exist.") - delete(d.quotaResizeMap, flexvol) - } else { - log.WithFields(log.Fields{"flexvol": flexvol, "error": zerr}).Debug("Error resizing quotas.") - } - - continue - } - - log.WithField("flexvol", flexvol).Debug("Started quota resize.") - - // Resize start succeeded, so no need to try again - delete(d.quotaResizeMap, flexvol) - } - } -} - -// getQuotaStatus returns the status of the quotas on a Flexvol -func (d *NASQtreeStorageDriver) getQuotaStatus(flexvol string) (string, error) { - - statusResponse, err := d.API.QuotaStatus(flexvol) - if err = api.GetError(statusResponse, err); err != nil { - return "", fmt.Errorf("error getting quota status for Flexvol %s: %v", flexvol, err) - } - - return statusResponse.Result.Status(), nil - -} - -// getTotalHardDiskLimitQuota returns the sum of all disk limit quota rules on a Flexvol -func (d *NASQtreeStorageDriver) getTotalHardDiskLimitQuota(flexvol string) (uint64, error) { - - listResponse, err := d.API.QuotaEntryList(flexvol) - if err != nil { - return 0, err - } - - var totalDiskLimitKB uint64 - - if listResponse.Result.AttributesListPtr != nil { - for _, rule := range listResponse.Result.AttributesListPtr.QuotaEntryPtr { - diskLimitKB, err := strconv.ParseUint(rule.DiskLimit(), 10, 64) - if err != nil { - continue - } - totalDiskLimitKB += diskLimitKB - } - } - - return totalDiskLimitKB * 1024, nil -} - -// pruneUnusedFlexvols is called periodically by a background task. Any Flexvols -// that are managed by this driver (discovered by virtue of having a well-known -// hardcoded prefix on their names) that have no qtrees are deleted. -func (d *NASQtreeStorageDriver) pruneUnusedFlexvols() { - - // Ensure we don't prune any Flexvol that is involved in a qtree provisioning workflow - utils.Lock("prune", d.sharedLockID) - defer utils.Unlock("prune", d.sharedLockID) - - log.Debug("Housekeeping, checking for managed Flexvols with no qtrees.") - - // Get list of Flexvols managed by this driver - volumeListResponse, err := d.API.VolumeList(d.FlexvolNamePrefix()) - if err = api.GetError(volumeListResponse, err); err != nil { - log.WithField("error", err).Error("Could not list Flexvols.") - return - } - - var flexvols []string - if volumeListResponse.Result.AttributesListPtr != nil { - for _, volAttrs := range volumeListResponse.Result.AttributesListPtr.VolumeAttributesPtr { - volIDAttrs := volAttrs.VolumeIdAttributes() - volName := string(volIDAttrs.Name()) - flexvols = append(flexvols, volName) - } - } - - // Update map of empty Flexvols - for _, flexvol := range flexvols { - - qtreeCount, err := d.API.QtreeCount(flexvol) - if err != nil { - // Couldn't count qtrees, so remove Flexvol from deletion map as a precaution - log.WithFields(log.Fields{"flexvol": flexvol, "error": err}).Warning("Could not count qtrees in Flexvol.") - delete(d.emptyFlexvolMap, flexvol) - } else if qtreeCount == 0 { - // No qtrees exist, so add Flexvol to map if it isn't there already - if _, ok := d.emptyFlexvolMap[flexvol]; !ok { - log.WithField("flexvol", flexvol).Debug("Flexvol has no qtrees, saving to delete deferral map.") - d.emptyFlexvolMap[flexvol] = time.Now() - } else { - log.WithField("flexvol", flexvol).Debug("Flexvol has no qtrees, already in delete deferral map.") - } - } else { - // Qtrees exist, so ensure Flexvol isn't in deletion map - log.WithFields(log.Fields{"flexvol": flexvol, "qtrees": qtreeCount}).Debug("Flexvol has qtrees.") - delete(d.emptyFlexvolMap, flexvol) - } - } - - // Destroy any Flexvol if it is devoid of qtrees and has remained empty for the configured time to live - for flexvol, initialEmptyTime := range d.emptyFlexvolMap { - - // If Flexvol is no longer known to the driver, remove from map and move on - if !utils.StringInSlice(flexvol, flexvols) { - log.WithField("flexvol", flexvol).Debug("Flexvol no longer extant, removing from delete deferral map.") - delete(d.emptyFlexvolMap, flexvol) - continue - } - - now := time.Now() - expirationTime := initialEmptyTime.Add(d.emptyFlexvolDeferredDeletePeriod) - if expirationTime.Before(now) { - log.WithField("flexvol", flexvol).Debug("Deleting managed Flexvol with no qtrees.") - volDestroyResponse, err := d.API.VolumeDestroy(flexvol, true) - if err = api.GetError(volDestroyResponse, err); err != nil { - log.WithFields(log.Fields{"flexvol": flexvol, "error": err}).Error("Could not delete Flexvol.") - } else { - delete(d.emptyFlexvolMap, flexvol) - } - } else { - log.WithFields(log.Fields{ - "flexvol": flexvol, - "timeToExpiration": expirationTime.Sub(now), - }).Debug("Flexvol with no qtrees not past expiration time.") - } - } -} - -// reapDeletedQtrees is called periodically by a background task. Any qtrees -// that have been deleted (discovered by virtue of having a well-known hardcoded -// prefix on their names) are destroyed. This is only needed for the exceptional case -// in which a qtree was renamed (prior to being destroyed) but the subsequent -// destroy call failed or was never made due to a process interruption. -func (d *NASQtreeStorageDriver) reapDeletedQtrees() { - - // Ensure we don't reap any qtree that is involved in a qtree delete workflow - utils.Lock("reap", d.sharedLockID) - defer utils.Unlock("reap", d.sharedLockID) - - log.Debug("Housekeeping, checking for deleted qtrees.") - - // Get all deleted qtrees in all Flexvols managed by this driver - prefix := deletedQtreeNamePrefix + *d.Config.StoragePrefix - listResponse, err := d.API.QtreeList(prefix, d.FlexvolNamePrefix()) - if err = api.GetError(listResponse, err); err != nil { - log.Errorf("Error listing deleted qtrees. %v", err) - return - } - - if listResponse.Result.AttributesListPtr != nil { - for _, qtree := range listResponse.Result.AttributesListPtr.QtreeInfoPtr { - qtreePath := fmt.Sprintf("/vol/%s/%s", qtree.Volume(), qtree.Qtree()) - log.WithField("qtree", qtreePath).Debug("Housekeeping, reaping deleted qtree.") - d.API.QtreeDestroyAsync(qtreePath, true) - } - } -} - -// ensureDefaultExportPolicy checks for an export policy with a well-known name that will be suitable -// for setting on a Flexvol and will enable access to all qtrees therein. If the policy exists, the -// method assumes it created the policy itself and that all is good. If the policy does not exist, -// it is created and populated with a rule that allows access to NFS qtrees. This method should be -// called once during driver initialization. -func (d *NASQtreeStorageDriver) ensureDefaultExportPolicy() error { - - policyResponse, err := d.API.ExportPolicyCreate(d.flexvolExportPolicy) - if err != nil { - return fmt.Errorf("error creating export policy %s: %v", d.flexvolExportPolicy, err) - } - if zerr := api.NewZapiError(policyResponse); !zerr.IsPassed() { - if zerr.Code() == azgo.EDUPLICATEENTRY { - log.WithField("exportPolicy", d.flexvolExportPolicy).Debug("Export policy already exists.") - } else { - return fmt.Errorf("error creating export policy %s: %v", d.flexvolExportPolicy, zerr) - } - } - - return d.ensureDefaultExportPolicyRule() -} - -// ensureDefaultExportPolicyRule guarantees that the export policy used on Flexvols managed by this -// driver has at least one rule, which is necessary (but not always sufficient) to enable qtrees -// to be mounted by clients. -func (d *NASQtreeStorageDriver) ensureDefaultExportPolicyRule() error { - - ruleListResponse, err := d.API.ExportRuleGetIterRequest(d.flexvolExportPolicy) - if err = api.GetError(ruleListResponse, err); err != nil { - return fmt.Errorf("error listing export policy rules: %v", err) - } - - if ruleListResponse.Result.NumRecords() == 0 { - - // No rules, so create one - ruleResponse, err := d.API.ExportRuleCreate( - d.flexvolExportPolicy, "0.0.0.0/0", - []string{"nfs"}, []string{"any"}, []string{"any"}, []string{"any"}) - if err = api.GetError(ruleResponse, err); err != nil { - return fmt.Errorf("error creating export rule: %v", err) - } - } else { - log.WithField("exportPolicy", d.flexvolExportPolicy).Debug("Export policy has at least one rule.") - } - - return nil -} - -// Retrieve storage backend capabilities -func (d *NASQtreeStorageDriver) GetStorageBackendSpecs(backend *storage.Backend) error { - if d.Config.BackendName == "" { - // Use the old naming scheme if no name is specified - backend.Name = "ontapnaseco_" + d.Config.DataLIF - } else { - backend.Name = d.Config.BackendName - } - poolAttrs := d.getStoragePoolAttributes() - return getStorageBackendSpecsCommon(d, backend, poolAttrs) -} - -func (d *NASQtreeStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { - - return map[string]sa.Offer{ - sa.BackendType: sa.NewStringOffer(d.Name()), - sa.Snapshots: sa.NewBoolOffer(false), - sa.Clones: sa.NewBoolOffer(false), - sa.Encryption: sa.NewBoolOffer(true), - sa.ProvisioningType: sa.NewStringOffer("thick", "thin"), - } -} - -func (d *NASQtreeStorageDriver) GetVolumeOpts( - volConfig *storage.VolumeConfig, - pool *storage.Pool, - requests map[string]sa.Request, -) (map[string]string, error) { - return getVolumeOptsCommon(volConfig, pool, requests), nil -} - -func (d *NASQtreeStorageDriver) GetInternalVolumeName(name string) string { - - if tridentconfig.UsingPassthroughStore { - // With a passthrough store, the name mapping must remain reversible - return *d.Config.StoragePrefix + name - } else { - // With an external store, any transformation of the name is fine - internal := drivers.GetCommonInternalVolumeName(d.Config.CommonStorageDriverConfig, name) - internal = strings.Replace(internal, "-", "_", -1) // ONTAP disallows hyphens - internal = strings.Replace(internal, ".", "_", -1) // ONTAP disallows periods - internal = strings.Replace(internal, "__", "_", -1) // Remove any double underscores - - if len(internal) > 64 { - // ONTAP imposes a 64-character limit on qtree names. We are unlikely to exceed - // that with CSI unless the storage prefix is really long, but non-CSI can hit the - // limit more easily. If the computed name is over the limit, the safest approach is - // simply to generate a new name. - internal = fmt.Sprintf("%s_%s", - strings.Replace(drivers.GetDefaultStoragePrefix(d.Config.DriverContext), "_", "", -1), - strings.Replace(uuid.New().String(), "-", "", -1)) - - log.WithFields(log.Fields{ - "Name": name, - "InternalName": internal, - }).Debug("Created UUID-based name for ontap-nas-economy volume.") - } - - return internal - } -} - -func (d *NASQtreeStorageDriver) CreatePrepare(volConfig *storage.VolumeConfig) error { - return createPrepareCommon(d, volConfig) -} - -func (d *NASQtreeStorageDriver) CreateFollowup(volConfig *storage.VolumeConfig) error { - - // Determine which Flexvol contains the qtree - exists, flexvol, err := d.API.QtreeExists(volConfig.InternalName, d.FlexvolNamePrefix()) - if err != nil { - return fmt.Errorf("could not determine if qtree %s exists: %v", volConfig.InternalName, err) - } - if !exists { - return fmt.Errorf("could not find qtree %s", volConfig.InternalName) - } - - // Set export path info on the volume config - volConfig.AccessInfo.NfsServerIP = d.Config.DataLIF - volConfig.AccessInfo.NfsPath = fmt.Sprintf("/%s/%s", flexvol, volConfig.InternalName) - volConfig.AccessInfo.MountOptions = strings.TrimPrefix(d.Config.NfsMountOptions, "-o ") - - return nil -} - -func (d *NASQtreeStorageDriver) GetProtocol() tridentconfig.Protocol { - return tridentconfig.File -} - -func (d *NASQtreeStorageDriver) StoreConfig(b *storage.PersistentStorageBackendConfig) { - drivers.SanitizeCommonStorageDriverConfig(d.Config.CommonStorageDriverConfig) - b.OntapConfig = &d.Config -} - -func (d *NASQtreeStorageDriver) GetExternalConfig() interface{} { - return getExternalConfig(d.Config) -} - -// GetVolumeExternal queries the storage backend for all relevant info about -// a single container volume managed by this driver and returns a VolumeExternal -// representation of the volume. -func (d *NASQtreeStorageDriver) GetVolumeExternal(name string) (*storage.VolumeExternal, error) { - - qtree, err := d.API.QtreeGet(name, d.FlexvolNamePrefix()) - if err != nil { - return nil, err - } - - volume, err := d.API.VolumeGet(qtree.Volume()) - if err != nil { - return nil, err - } - - quotaTarget := fmt.Sprintf("/vol/%s/%s", qtree.Volume(), qtree.Qtree()) - quota, err := d.API.QuotaGetEntry(quotaTarget) - if err != nil { - return nil, err - } - - return d.getVolumeExternal(qtree, volume, quota), nil -} - -// GetVolumeExternalWrappers queries the storage backend for all relevant info about -// container volumes managed by this driver. It then writes a VolumeExternal -// representation of each volume to the supplied channel, closing the channel -// when finished. -func (d *NASQtreeStorageDriver) GetVolumeExternalWrappers( - channel chan *storage.VolumeExternalWrapper) { - - // Let the caller know we're done by closing the channel - defer close(channel) - - // Get all volumes matching the storage prefix - volumesResponse, err := d.API.VolumeGetAll(d.FlexvolNamePrefix()) - if err = api.GetError(volumesResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Bail out early if there aren't any Flexvols - if volumesResponse.Result.AttributesListPtr == nil { - return - } - if len(volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr) == 0 { - return - } - - // Get all qtrees in all Flexvols matching the storage prefix - qtreesResponse, err := d.API.QtreeGetAll(d.FlexvolNamePrefix()) - if err = api.GetError(qtreesResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Bail out early if there aren't any qtrees - if qtreesResponse.Result.AttributesListPtr == nil { - return - } - if len(qtreesResponse.Result.AttributesListPtr.QtreeInfoPtr) == 0 { - return - } - - // Get all quotas in all Flexvols matching the storage prefix - quotasResponse, err := d.API.QuotaEntryList(d.FlexvolNamePrefix() + "*") - if err = api.GetError(quotasResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Make a map of volumes for faster correlation with qtrees - volumeMap := make(map[string]azgo.VolumeAttributesType) - if volumesResponse.Result.AttributesListPtr != nil { - for _, volumeAttrs := range volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr { - internalName := string(volumeAttrs.VolumeIdAttributesPtr.Name()) - volumeMap[internalName] = volumeAttrs - } - } - - // Make a map of quotas for faster correlation with qtrees - quotaMap := make(map[string]azgo.QuotaEntryType) - if quotasResponse.Result.AttributesListPtr != nil { - for _, quotaAttrs := range quotasResponse.Result.AttributesListPtr.QuotaEntryPtr { - quotaMap[quotaAttrs.QuotaTarget()] = quotaAttrs - } - } - - // Convert all qtrees to VolumeExternal and write them to the channel - if qtreesResponse.Result.AttributesListPtr != nil { - for _, qtree := range qtreesResponse.Result.AttributesListPtr.QtreeInfoPtr { - - // Ignore Flexvol-level qtrees - if qtree.Qtree() == "" { - continue - } - - // Don't include deleted qtrees - if strings.HasPrefix(qtree.Qtree(), deletedQtreeNamePrefix) { - continue - } - - volume, ok := volumeMap[qtree.Volume()] - if !ok { - log.WithField("qtree", qtree.Qtree()).Warning("Flexvol not found for qtree.") - continue - } - - quotaTarget := fmt.Sprintf("/vol/%s/%s", qtree.Volume(), qtree.Qtree()) - quota, ok := quotaMap[quotaTarget] - if !ok { - log.WithField("qtree", qtree.Qtree()).Warning("Quota rule not found for qtree.") - continue - } - - channel <- &storage.VolumeExternalWrapper{Volume: d.getVolumeExternal(&qtree, &volume, "a), Error: nil} - } - } -} - -// getVolumeExternal is a private method that accepts info about a volume -// as returned by the storage backend and formats it as a VolumeExternal -// object. -func (d *NASQtreeStorageDriver) getVolumeExternal( - qtreeAttrs *azgo.QtreeInfoType, volumeAttrs *azgo.VolumeAttributesType, - quotaAttrs *azgo.QuotaEntryType) *storage.VolumeExternal { - - volumeIDAttrs := volumeAttrs.VolumeIdAttributesPtr - volumeSnapshotAttrs := volumeAttrs.VolumeSnapshotAttributesPtr - - internalName := qtreeAttrs.Qtree() - name := internalName - if strings.HasPrefix(internalName, *d.Config.StoragePrefix) { - name = internalName[len(*d.Config.StoragePrefix):] - } - - size := convertDiskLimitToBytes(quotaAttrs.DiskLimit()) - - volumeConfig := &storage.VolumeConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: name, - InternalName: internalName, - Size: strconv.FormatInt(size, 10), - Protocol: tridentconfig.File, - SnapshotPolicy: volumeSnapshotAttrs.SnapshotPolicy(), - ExportPolicy: qtreeAttrs.ExportPolicy(), - SnapshotDir: strconv.FormatBool(volumeSnapshotAttrs.SnapdirAccessEnabled()), - UnixPermissions: qtreeAttrs.Mode(), - StorageClass: "", - AccessMode: tridentconfig.ReadWriteMany, - AccessInfo: utils.VolumeAccessInfo{}, - BlockSize: "", - FileSystem: "", - } - - return &storage.VolumeExternal{ - Config: volumeConfig, - Pool: volumeIDAttrs.ContainingAggregateName(), - } -} - -func convertDiskLimitToBytes(diskLimit string) int64 { - size, err := strconv.ParseInt(diskLimit, 10, 64) - if err != nil { - size = 0 - } else { - size *= 1024 // convert KB to bytes - } - return size -} - -// GetUpdateType returns a bitmap populated with updates to the driver -func (d *NASQtreeStorageDriver) GetUpdateType(driverOrig storage.Driver) *roaring.Bitmap { - bitmap := roaring.New() - dOrig, ok := driverOrig.(*NASQtreeStorageDriver) - if !ok { - bitmap.Add(storage.InvalidUpdate) - return bitmap - } - - if d.Config.DataLIF != dOrig.Config.DataLIF { - bitmap.Add(storage.VolumeAccessInfoChange) - } - - if d.Config.Password != dOrig.Config.Password { - bitmap.Add(storage.PasswordChange) - } - - if d.Config.Username != dOrig.Config.Username { - bitmap.Add(storage.UsernameChange) - } - - return bitmap -} - -type HousekeepingTask struct { - Name string - Ticker *time.Ticker - InitialDelay time.Duration - Done chan struct{} - Tasks []func() - Driver *NASQtreeStorageDriver - stopped bool -} - -func (t *HousekeepingTask) Start() { - go func() { - t.Driver.housekeepingWaitGroup.Add(1) - defer t.Driver.housekeepingWaitGroup.Done() - time.Sleep(t.InitialDelay) - t.run(time.Now()) - for { - select { - case tick := <-t.Ticker.C: - t.run(tick) - case <-t.Done: - log.WithFields(log.Fields{ - "driver": t.Driver.Name(), - "task": t.Name, - }).Debugf("Shut down housekeeping tasks for the driver.") - return - } - } - }() -} - -func (t *HousekeepingTask) Stop() { - if !t.stopped { - if t.Ticker != nil { - t.Ticker.Stop() - } - close(t.Done) - t.stopped = true - // Run the housekeeping tasks one last time - for _, task := range t.Tasks { - task() - } - } -} - -func (t *HousekeepingTask) run(tick time.Time) { - for i, task := range t.Tasks { - log.WithFields(log.Fields{ - "tick": tick, - "driver": t.Driver.Name(), - "task": t.Name, - }).Debugf("Performing housekeeping task %d.", i) - task() - } -} - -func NewPruneTask(d *NASQtreeStorageDriver, tasks []func()) *HousekeepingTask { - - // Read background task timings from config file, use defaults if missing or invalid - pruneFlexvolsPeriodSecs := defaultPruneFlexvolsPeriodSecs - if d.Config.QtreePruneFlexvolsPeriod != "" { - i, err := strconv.ParseUint(d.Config.QtreePruneFlexvolsPeriod, 10, 64) - if err != nil { - log.WithField("interval", d.Config.QtreePruneFlexvolsPeriod).Warnf( - "Invalid Flexvol pruning interval. %v", err) - } else { - pruneFlexvolsPeriodSecs = i - } - } - emptyFlexvolDeferredDeletePeriodSecs := defaultEmptyFlexvolDeferredDeletePeriodSecs - if d.Config.EmptyFlexvolDeferredDeletePeriod != "" { - i, err := strconv.ParseUint(d.Config.EmptyFlexvolDeferredDeletePeriod, 10, 64) - if err != nil { - log.WithField("interval", d.Config.EmptyFlexvolDeferredDeletePeriod).Warnf( - "Invalid Flexvol deferred delete period. %v", err) - } else { - emptyFlexvolDeferredDeletePeriodSecs = i - } - } - d.emptyFlexvolDeferredDeletePeriod = time.Duration(emptyFlexvolDeferredDeletePeriodSecs) * time.Second - log.WithFields(log.Fields{ - "IntervalSeconds": pruneFlexvolsPeriodSecs, - "EmptyFlexvolTTL": emptyFlexvolDeferredDeletePeriodSecs, - }).Debug("Configured Flexvol pruning period.") - - task := &HousekeepingTask{ - Name: pruneTask, - Ticker: time.NewTicker(time.Duration(pruneFlexvolsPeriodSecs) * time.Second), - InitialDelay: HousekeepingStartupDelaySecs * time.Second, - Done: make(chan struct{}), - Tasks: tasks, - Driver: d, - } - - return task -} - -func NewResizeTask(d *NASQtreeStorageDriver, tasks []func()) *HousekeepingTask { - // Read background task timings from config file, use defaults if missing or invalid - resizeQuotasPeriodSecs := defaultResizeQuotasPeriodSecs - if d.Config.QtreeQuotaResizePeriod != "" { - i, err := strconv.ParseUint(d.Config.QtreeQuotaResizePeriod, 10, 64) - if err != nil { - log.WithField("interval", d.Config.QtreeQuotaResizePeriod).Warnf( - "Invalid quota resize interval. %v", err) - } else { - resizeQuotasPeriodSecs = i - } - } - log.WithFields(log.Fields{ - "IntervalSeconds": resizeQuotasPeriodSecs, - }).Debug("Configured quota resize period.") - - task := &HousekeepingTask{ - Name: resizeTask, - Ticker: time.NewTicker(time.Duration(resizeQuotasPeriodSecs) * time.Second), - InitialDelay: HousekeepingStartupDelaySecs * time.Second, - Done: make(chan struct{}), - Tasks: tasks, - Driver: d, - } - - return task -} - -// Resize expands the Flexvol containing the Qtree and updates the Qtree quota. -func (d *NASQtreeStorageDriver) Resize(volConfig *storage.VolumeConfig, sizeBytes uint64) error { - - name := volConfig.InternalName - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Resize", - "Type": "NASQtreeStorageDriver", - "name": name, - "sizeBytes": sizeBytes, - } - log.WithFields(fields).Debug(">>>> Resize") - defer log.WithFields(fields).Debug("<<<< Resize") - } - - // Ensure any Flexvol won't be pruned before resize is completed. - utils.Lock("resize", d.sharedLockID) - defer utils.Unlock("resize", d.sharedLockID) - - // Generic user-facing message - resizeError := errors.New("storage driver failed to resize the volume") - - // Check that volume exists - exists, flexvol, err := d.API.QtreeExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.WithField("error", err).Error("Error checking for existing volume.") - return resizeError - } - if !exists { - log.WithFields(log.Fields{"qtree": name, "flexvol": flexvol}).Debug("Qtree does not exist.") - return fmt.Errorf("volume %s does not exist", name) - } - - // Calculate the delta size needed to resize the Qtree quota - quotaSize, err := d.getQuotaDiskLimitSize(name, flexvol) - if err != nil { - log.WithField("error", err).Error("Failed to determine quota size.") - return resizeError - } - - if sizeBytes == quotaSize { - log.Infof("Requested size and existing volume size are the same for volume %s.", name) - return nil - } - - if sizeBytes < quotaSize { - return fmt.Errorf("requested size %d is less than existing volume size %d", sizeBytes, quotaSize) - } - deltaQuotaSize := sizeBytes - quotaSize - - if aggrLimitsErr := checkAggregateLimitsForFlexvol(flexvol, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - if _, _, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, d.Config.CommonStorageDriverConfig); checkVolumeSizeLimitsError != nil { - return checkVolumeSizeLimitsError - } - - err = d.resizeFlexvol(flexvol, deltaQuotaSize) - if err != nil { - log.WithField("error", err).Error("Failed to resize flexvol.") - return resizeError - } - - // Update the quota - err = d.setQuotaForQtree(name, flexvol, sizeBytes) - if err != nil { - log.WithField("error", err).Error("Qtree quota update failed.") - return resizeError - } - - volConfig.Size = strconv.FormatUint(sizeBytes, 10) - return nil -} - -// resizeFlexvol grows or shrinks the Flexvol to an optimal size if possible. Otherwise -// the Flexvol is expanded by the value of sizeBytes -func (d *NASQtreeStorageDriver) resizeFlexvol(flexvol string, sizeBytes uint64) error { - flexvolSizeBytes, err := d.getOptimalSizeForFlexvol(flexvol, sizeBytes) - if err != nil { - log.Warnf("Could not calculate optimal Flexvol size. %v", err) - // Lacking the optimal size, just grow the Flexvol to contain the new qtree - size := strconv.FormatUint(sizeBytes, 10) - resizeResponse, err := d.API.VolumeSetSize(flexvol, "+"+size) - if err = api.GetError(resizeResponse, err); err != nil { - return fmt.Errorf("flexvol resize failed: %v", err) - } - } else { - // Got optimal size, so just set the Flexvol to that value - flexvolSizeStr := strconv.FormatUint(flexvolSizeBytes, 10) - resizeResponse, err := d.API.VolumeSetSize(flexvol, flexvolSizeStr) - if err = api.GetError(resizeResponse, err); err != nil { - return fmt.Errorf("flexvol resize failed: %v", err) - } - } - return nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_san.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_san.go deleted file mode 100644 index 11346d469..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_san.go +++ /dev/null @@ -1,851 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package ontap - -import ( - "errors" - "fmt" - "strconv" - "strings" - - "github.com/RoaringBitmap/roaring" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/storage_drivers/ontap/api" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -func lunPath(name string) string { - return fmt.Sprintf("/vol/%v/lun0", name) -} - -// SANStorageDriver is for iSCSI storage provisioning -type SANStorageDriver struct { - initialized bool - Config drivers.OntapStorageDriverConfig - ips []string - API *api.Client - Telemetry *Telemetry -} - -func (d *SANStorageDriver) GetConfig() *drivers.OntapStorageDriverConfig { - return &d.Config -} - -func (d *SANStorageDriver) GetAPI() *api.Client { - return d.API -} - -func (d *SANStorageDriver) GetTelemetry() *Telemetry { - d.Telemetry.Telemetry = tridentconfig.OrchestratorTelemetry - return d.Telemetry -} - -// Name is for returning the name of this driver -func (d SANStorageDriver) Name() string { - return drivers.OntapSANStorageDriverName -} - -// Initialize from the provided config -func (d *SANStorageDriver) Initialize( - context tridentconfig.DriverContext, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, -) error { - - if commonConfig.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Initialize", "Type": "SANStorageDriver"} - log.WithFields(fields).Debug(">>>> Initialize") - defer log.WithFields(fields).Debug("<<<< Initialize") - } - - // Parse the config - config, err := InitializeOntapConfig(context, configJSON, commonConfig) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - d.API, err = InitializeOntapDriver(config) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - d.ips, err = d.API.NetInterfaceGetDataLIFs("iscsi") - if err != nil { - return err - } - - if len(d.ips) == 0 { - return fmt.Errorf("no iSCSI data LIFs found on SVM %s", config.SVM) - } else { - log.WithField("dataLIFs", d.ips).Debug("Found iSCSI LIFs.") - } - - err = InitializeSANDriver(context, d.API, &d.Config, d.validate) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - - // Set up the autosupport heartbeat - d.Telemetry = NewOntapTelemetry(d) - d.Telemetry.Start() - - d.initialized = true - return nil -} - -func (d *SANStorageDriver) Initialized() bool { - return d.initialized -} - -func (d *SANStorageDriver) Terminate() { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Terminate", "Type": "SANStorageDriver"} - log.WithFields(fields).Debug(">>>> Terminate") - defer log.WithFields(fields).Debug("<<<< Terminate") - } - if d.Telemetry != nil { - d.Telemetry.Stop() - } - d.initialized = false -} - -// Validate the driver configuration and execution environment -func (d *SANStorageDriver) validate() error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "validate", "Type": "SANStorageDriver"} - log.WithFields(fields).Debug(">>>> validate") - defer log.WithFields(fields).Debug("<<<< validate") - } - - if err := ValidateSANDriver(d.API, &d.Config, d.ips); err != nil { - return fmt.Errorf("driver validation failed: %v", err) - } - - return nil -} - -// Create a volume+LUN with the specified options -func (d *SANStorageDriver) Create( - volConfig *storage.VolumeConfig, storagePool *storage.Pool, volAttributes map[string]sa.Request, -) error { - - name := volConfig.InternalName - - var fstype string - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Create", - "Type": "SANStorageDriver", - "name": name, - "attrs": volAttributes, - } - log.WithFields(fields).Debug(">>>> Create") - defer log.WithFields(fields).Debug("<<<< Create") - } - - // If the volume already exists, bail out - volExists, err := d.API.VolumeExists(name) - if err != nil { - return fmt.Errorf("error checking for existing volume: %v", err) - } - if volExists { - return drivers.NewVolumeExistsError(name) - } - - // Determine volume size in bytes - requestedSize, err := utils.ConvertSizeToBytes(volConfig.Size) - if err != nil { - return fmt.Errorf("could not convert volume size %s: %v", volConfig.Size, err) - } - sizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) - if err != nil { - return fmt.Errorf("%v is an invalid volume size: %v", volConfig.Size, err) - } - sizeBytes, err = GetVolumeSize(sizeBytes, d.Config) - if err != nil { - return err - } - - // Get options - opts, err := d.GetVolumeOpts(volConfig, storagePool, volAttributes) - if err != nil { - return err - } - - // Get options with default fallback values - // see also: ontap_common.go#PopulateConfigurationDefaults - size := strconv.FormatUint(sizeBytes, 10) - spaceAllocation, _ := strconv.ParseBool(utils.GetV(opts, "spaceAllocation", d.Config.SpaceAllocation)) - spaceReserve := utils.GetV(opts, "spaceReserve", d.Config.SpaceReserve) - snapshotPolicy := utils.GetV(opts, "snapshotPolicy", d.Config.SnapshotPolicy) - snapshotReserve := utils.GetV(opts, "snapshotReserve", d.Config.SnapshotReserve) - unixPermissions := utils.GetV(opts, "unixPermissions", d.Config.UnixPermissions) - snapshotDir := "false" - exportPolicy := utils.GetV(opts, "exportPolicy", d.Config.ExportPolicy) - aggregate := utils.GetV(opts, "aggregate", d.Config.Aggregate) - securityStyle := utils.GetV(opts, "securityStyle", d.Config.SecurityStyle) - encryption := utils.GetV(opts, "encryption", d.Config.Encryption) - - if aggrLimitsErr := checkAggregateLimits(aggregate, spaceReserve, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - if _, _, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, d.Config.CommonStorageDriverConfig); checkVolumeSizeLimitsError != nil { - return checkVolumeSizeLimitsError - } - - enableEncryption, err := strconv.ParseBool(encryption) - if err != nil { - return fmt.Errorf("invalid boolean value for encryption: %v", err) - } - - snapshotReserveInt, err := GetSnapshotReserve(snapshotPolicy, snapshotReserve) - if err != nil { - return fmt.Errorf("invalid value for snapshotReserve: %v", err) - } - - fstype, err = drivers.CheckSupportedFilesystem(utils.GetV(opts, "fstype|fileSystemType", d.Config.FileSystemType), name) - if err != nil { - return err - } - - log.WithFields(log.Fields{ - "name": name, - "size": size, - "spaceAllocation": spaceAllocation, - "spaceReserve": spaceReserve, - "snapshotPolicy": snapshotPolicy, - "snapshotReserve": snapshotReserveInt, - "unixPermissions": unixPermissions, - "snapshotDir": snapshotDir, - "exportPolicy": exportPolicy, - "aggregate": aggregate, - "securityStyle": securityStyle, - "encryption": enableEncryption, - }).Debug("Creating Flexvol.") - - // Create the volume - volCreateResponse, err := d.API.VolumeCreate( - name, aggregate, size, spaceReserve, snapshotPolicy, unixPermissions, - exportPolicy, securityStyle, enableEncryption, snapshotReserveInt) - - if err = api.GetError(volCreateResponse, err); err != nil { - if zerr, ok := err.(api.ZapiError); ok { - // Handle case where the Create is passed to every Docker Swarm node - if zerr.Code() == azgo.EAPIERROR && strings.HasSuffix(strings.TrimSpace(zerr.Reason()), "Job exists") { - log.WithField("volume", name).Warn("Volume create job already exists, " + - "skipping volume create on this node.") - return nil - } - } - return fmt.Errorf("error creating volume: %v", err) - } - - lunPath := lunPath(name) - osType := "linux" - - // Create the LUN - lunCreateResponse, err := d.API.LunCreate(lunPath, int(sizeBytes), osType, false, spaceAllocation) - if err = api.GetError(lunCreateResponse, err); err != nil { - return fmt.Errorf("error creating LUN: %v", err) - } - - // Save the fstype in a LUN attribute so we know what to do in Attach - attrResponse, err := d.API.LunSetAttribute(lunPath, LUNAttributeFSType, fstype) - if err = api.GetError(attrResponse, err); err != nil { - defer d.API.LunDestroy(lunPath) - return fmt.Errorf("error saving file system type for LUN: %v", err) - } - // Save the context - attrResponse, err = d.API.LunSetAttribute(lunPath, "context", string(d.Config.DriverContext)) - if err = api.GetError(attrResponse, err); err != nil { - log.WithField("name", name).Warning("Failed to save the driver context attribute for new volume.") - } - - return nil -} - -// Create a volume clone -func (d *SANStorageDriver) CreateClone(volConfig *storage.VolumeConfig) error { - - name := volConfig.InternalName - source := volConfig.CloneSourceVolumeInternal - snapshot := volConfig.CloneSourceSnapshot - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateClone", - "Type": "SANStorageDriver", - "name": name, - "source": source, - "snapshot": snapshot, - } - log.WithFields(fields).Debug(">>>> CreateClone") - defer log.WithFields(fields).Debug("<<<< CreateClone") - } - - opts, err := d.GetVolumeOpts(volConfig, nil, make(map[string]sa.Request)) - if err != nil { - return err - } - - split, err := strconv.ParseBool(utils.GetV(opts, "splitOnClone", d.Config.SplitOnClone)) - if err != nil { - return fmt.Errorf("invalid boolean value for splitOnClone: %v", err) - } - - log.WithField("splitOnClone", split).Debug("Creating volume clone.") - return CreateOntapClone(name, source, snapshot, split, &d.Config, d.API) -} - -func (d *SANStorageDriver) Import(volConfig *storage.VolumeConfig, originalName string) error { - return errors.New("import is not implemented") -} - -func (d *SANStorageDriver) Rename(name string, newName string) error { - return errors.New("rename is not implemented") -} - -// Destroy the requested (volume,lun) storage tuple -func (d *SANStorageDriver) Destroy(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Destroy", - "Type": "SANStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Destroy") - defer log.WithFields(fields).Debug("<<<< Destroy") - } - - var ( - err error - iSCSINodeName string - lunID int - ) - - // Validate Flexvol exists before trying to destroy - volExists, err := d.API.VolumeExists(name) - if err != nil { - return fmt.Errorf("error checking for existing volume: %v", err) - } - if !volExists { - log.WithField("volume", name).Debug("Volume already deleted, skipping destroy.") - return nil - } - - if d.Config.DriverContext == tridentconfig.ContextDocker { - - // Get target info - iSCSINodeName, _, err = GetISCSITargetInfo(d.API, &d.Config) - if err != nil { - log.WithField("error", err).Error("Could not get target info.") - return err - } - - // Get the LUN ID - lunPath := fmt.Sprintf("/vol/%s/lun0", name) - lunMapResponse, err := d.API.LunMapListInfo(lunPath) - if err != nil { - return fmt.Errorf("error reading LUN maps for volume %s: %v", name, err) - } - lunID = -1 - if lunMapResponse.Result.InitiatorGroupsPtr != nil { - for _, lunMapResponse := range lunMapResponse.Result.InitiatorGroupsPtr.InitiatorGroupInfoPtr { - if lunMapResponse.InitiatorGroupName() == d.Config.IgroupName { - lunID = lunMapResponse.LunId() - } - } - } - if lunID >= 0 { - // Inform the host about the device removal - utils.PrepareDeviceForRemoval(lunID, iSCSINodeName) - } - } - - // Delete the Flexvol & LUN - volDestroyResponse, err := d.API.VolumeDestroy(name, true) - if err != nil { - return fmt.Errorf("error destroying volume %v: %v", name, err) - } - if zerr := api.NewZapiError(volDestroyResponse); !zerr.IsPassed() { - // Handle case where the Destroy is passed to every Docker Swarm node - if zerr.Code() == azgo.EVOLUMEDOESNOTEXIST { - log.WithField("volume", name).Warn("Volume already deleted.") - } else { - return fmt.Errorf("error destroying volume %v: %v", name, zerr) - } - } - - return nil -} - -// Publish the volume to the host specified in publishInfo. This method may or may not be running on the host -// where the volume will be mounted, so it should limit itself to updating access rules, initiator groups, etc. -// that require some host identity (but not locality) as well as storage controller API access. -func (d *SANStorageDriver) Publish(name string, publishInfo *utils.VolumePublishInfo) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Publish", - "Type": "SANStorageDriver", - "name": name, - } - log.WithFields(fields).Debug(">>>> Publish") - defer log.WithFields(fields).Debug("<<<< Publish") - } - - lunPath := lunPath(name) - igroupName := d.Config.IgroupName - - // Get target info - iSCSINodeName, _, err := GetISCSITargetInfo(d.API, &d.Config) - if err != nil { - return err - } - - err = PublishLUN(d.API, &d.Config, d.ips, publishInfo, lunPath, igroupName, iSCSINodeName) - if err != nil { - return fmt.Errorf("error publishing %s driver: %v", d.Name(), err) - } - - return nil -} - -// GetSnapshot gets a snapshot. To distinguish between an API error reading the snapshot -// and a non-existent snapshot, this method may return (nil, nil). -func (d *SANStorageDriver) GetSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshot", - "Type": "SANStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshot") - defer log.WithFields(fields).Debug("<<<< GetSnapshot") - } - - return GetSnapshot(snapConfig, &d.Config, d.API, d.API.VolumeSize) -} - -// Return the list of snapshots associated with the specified volume -func (d *SANStorageDriver) GetSnapshots(volConfig *storage.VolumeConfig) ([]*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshots", - "Type": "SANStorageDriver", - "volumeName": volConfig.InternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshots") - defer log.WithFields(fields).Debug("<<<< GetSnapshots") - } - - return GetSnapshots(volConfig, &d.Config, d.API, d.API.VolumeSize) -} - -// CreateSnapshot creates a snapshot for the given volume -func (d *SANStorageDriver) CreateSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateSnapshot", - "Type": "SANStorageDriver", - "snapshotName": internalSnapName, - "sourceVolume": internalVolName, - } - log.WithFields(fields).Debug(">>>> CreateSnapshot") - defer log.WithFields(fields).Debug("<<<< CreateSnapshot") - } - - return CreateSnapshot(snapConfig, &d.Config, d.API, d.API.VolumeSize) -} - -// RestoreSnapshot restores a volume (in place) from a snapshot. -func (d *SANStorageDriver) RestoreSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "RestoreSnapshot", - "Type": "SANStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> RestoreSnapshot") - defer log.WithFields(fields).Debug("<<<< RestoreSnapshot") - } - - return RestoreSnapshot(snapConfig, &d.Config, d.API) -} - -// DeleteSnapshot creates a snapshot of a volume. -func (d *SANStorageDriver) DeleteSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "DeleteSnapshot", - "Type": "SANStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> DeleteSnapshot") - defer log.WithFields(fields).Debug("<<<< DeleteSnapshot") - } - - return DeleteSnapshot(snapConfig, &d.Config, d.API) -} - -// Test for the existence of a volume -func (d *SANStorageDriver) Get(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Get", "Type": "SANStorageDriver"} - log.WithFields(fields).Debug(">>>> Get") - defer log.WithFields(fields).Debug("<<<< Get") - } - - return GetVolume(name, d.API, &d.Config) -} - -// Retrieve storage backend capabilities -func (d *SANStorageDriver) GetStorageBackendSpecs(backend *storage.Backend) error { - if d.Config.BackendName == "" { - // Use the old naming scheme if no name is specified - backend.Name = "ontapsan_" + d.ips[0] - } else { - backend.Name = d.Config.BackendName - } - poolAttrs := d.getStoragePoolAttributes() - return getStorageBackendSpecsCommon(d, backend, poolAttrs) -} - -func (d *SANStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { - - return map[string]sa.Offer{ - sa.BackendType: sa.NewStringOffer(d.Name()), - sa.Snapshots: sa.NewBoolOffer(true), - sa.Clones: sa.NewBoolOffer(true), - sa.Encryption: sa.NewBoolOffer(true), - sa.ProvisioningType: sa.NewStringOffer("thick", "thin"), - } -} - -func (d *SANStorageDriver) GetVolumeOpts( - volConfig *storage.VolumeConfig, - pool *storage.Pool, - requests map[string]sa.Request, -) (map[string]string, error) { - return getVolumeOptsCommon(volConfig, pool, requests), nil -} - -func (d *SANStorageDriver) GetInternalVolumeName(name string) string { - return getInternalVolumeNameCommon(d.Config.CommonStorageDriverConfig, name) -} - -func (d *SANStorageDriver) CreatePrepare(volConfig *storage.VolumeConfig) error { - return createPrepareCommon(d, volConfig) -} - -func (d *SANStorageDriver) CreateFollowup(volConfig *storage.VolumeConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateFollowup", - "Type": "SANStorageDriver", - "name": volConfig.Name, - "internalName": volConfig.InternalName, - } - log.WithFields(fields).Debug(">>>> CreateFollowup") - defer log.WithFields(fields).Debug("<<<< CreateFollowup") - } - - if d.Config.DriverContext == tridentconfig.ContextDocker { - log.Debug("No follow-up create actions for Docker.") - return nil - } - - return d.mapOntapSANLun(volConfig) -} - -func (d *SANStorageDriver) mapOntapSANLun(volConfig *storage.VolumeConfig) error { - - // get the lunPath and lunID - lunPath := fmt.Sprintf("/vol/%v/lun0", volConfig.InternalName) - lunID, err := d.API.LunMapIfNotMapped(d.Config.IgroupName, lunPath) - if err != nil { - return err - } - - err = PopulateOntapLunMapping(d.API, &d.Config, d.ips, volConfig, lunID) - if err != nil { - return fmt.Errorf("error mapping LUN for %s driver: %v", d.Name(), err) - } - - return nil -} - -func (d *SANStorageDriver) GetProtocol() tridentconfig.Protocol { - return tridentconfig.Block -} - -func (d *SANStorageDriver) StoreConfig( - b *storage.PersistentStorageBackendConfig, -) { - drivers.SanitizeCommonStorageDriverConfig(d.Config.CommonStorageDriverConfig) - b.OntapConfig = &d.Config -} - -func (d *SANStorageDriver) GetExternalConfig() interface{} { - return getExternalConfig(d.Config) -} - -// GetVolumeExternal queries the storage backend for all relevant info about -// a single container volume managed by this driver and returns a VolumeExternal -// representation of the volume. -func (d *SANStorageDriver) GetVolumeExternal(name string) (*storage.VolumeExternal, error) { - - volumeAttrs, err := d.API.VolumeGet(name) - if err != nil { - return nil, err - } - - lunPath := fmt.Sprintf("/vol/%v/lun0", name) - lunAttrs, err := d.API.LunGet(lunPath) - if err != nil { - return nil, err - } - - return d.getVolumeExternal(lunAttrs, volumeAttrs), nil -} - -// GetVolumeExternalWrappers queries the storage backend for all relevant info about -// container volumes managed by this driver. It then writes a VolumeExternal -// representation of each volume to the supplied channel, closing the channel -// when finished. -func (d *SANStorageDriver) GetVolumeExternalWrappers( - channel chan *storage.VolumeExternalWrapper) { - - // Let the caller know we're done by closing the channel - defer close(channel) - - // Get all volumes matching the storage prefix - volumesResponse, err := d.API.VolumeGetAll(*d.Config.StoragePrefix) - if err = api.GetError(volumesResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Get all LUNs named 'lun0' in volumes matching the storage prefix - lunPathPattern := fmt.Sprintf("/vol/%v/lun0", *d.Config.StoragePrefix+"*") - lunsResponse, err := d.API.LunGetAll(lunPathPattern) - if err = api.GetError(lunsResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Make a map of volumes for faster correlation with LUNs - volumeMap := make(map[string]azgo.VolumeAttributesType) - if volumesResponse.Result.AttributesListPtr != nil { - for _, volumeAttrs := range volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr { - internalName := volumeAttrs.VolumeIdAttributesPtr.Name() - volumeMap[internalName] = volumeAttrs - } - } - - // Convert all LUNs to VolumeExternal and write them to the channel - if lunsResponse.Result.AttributesListPtr != nil { - for _, lun := range lunsResponse.Result.AttributesListPtr.LunInfoPtr { - - volume, ok := volumeMap[lun.Volume()] - if !ok { - log.WithField("path", lun.Path()).Warning("Flexvol not found for LUN.") - continue - } - - channel <- &storage.VolumeExternalWrapper{Volume: d.getVolumeExternal(&lun, &volume), Error: nil} - } - } -} - -// getVolumeExternal is a private method that accepts info about a volume -// as returned by the storage backend and formats it as a VolumeExternal -// object. -func (d *SANStorageDriver) getVolumeExternal( - lunAttrs *azgo.LunInfoType, volumeAttrs *azgo.VolumeAttributesType, -) *storage.VolumeExternal { - - volumeIDAttrs := volumeAttrs.VolumeIdAttributesPtr - volumeSnapshotAttrs := volumeAttrs.VolumeSnapshotAttributesPtr - - internalName := volumeIDAttrs.Name() - name := internalName - if strings.HasPrefix(internalName, *d.Config.StoragePrefix) { - name = internalName[len(*d.Config.StoragePrefix):] - } - - volumeConfig := &storage.VolumeConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: name, - InternalName: internalName, - Size: strconv.FormatInt(int64(lunAttrs.Size()), 10), - Protocol: tridentconfig.Block, - SnapshotPolicy: volumeSnapshotAttrs.SnapshotPolicy(), - ExportPolicy: "", - SnapshotDir: "false", - UnixPermissions: "", - StorageClass: "", - AccessMode: tridentconfig.ReadWriteOnce, - AccessInfo: utils.VolumeAccessInfo{}, - BlockSize: "", - FileSystem: "", - } - - return &storage.VolumeExternal{ - Config: volumeConfig, - Pool: volumeIDAttrs.ContainingAggregateName(), - } -} - -// GetUpdateType returns a bitmap populated with updates to the driver -func (d *SANStorageDriver) GetUpdateType(driverOrig storage.Driver) *roaring.Bitmap { - bitmap := roaring.New() - dOrig, ok := driverOrig.(*SANStorageDriver) - if !ok { - bitmap.Add(storage.InvalidUpdate) - return bitmap - } - - if d.Config.DataLIF != dOrig.Config.DataLIF { - bitmap.Add(storage.VolumeAccessInfoChange) - } - - if d.Config.Password != dOrig.Config.Password { - bitmap.Add(storage.PasswordChange) - } - - if d.Config.Username != dOrig.Config.Username { - bitmap.Add(storage.UsernameChange) - } - - return bitmap -} - -// Resize expands the volume size. -func (d *SANStorageDriver) Resize(volConfig *storage.VolumeConfig, sizeBytes uint64) error { - - name := volConfig.InternalName - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Resize", - "Type": "SANStorageDriver", - "name": name, - "sizeBytes": sizeBytes, - } - log.WithFields(fields).Debug(">>>> Resize") - defer log.WithFields(fields).Debug("<<<< Resize") - } - - // Validation checks - volExists, err := d.API.VolumeExists(name) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "name": name, - }).Error("Error checking for existing volume.") - return fmt.Errorf("error occurred checking for existing volume") - } - if !volExists { - return fmt.Errorf("volume %s does not exist", name) - } - - volSize, err := d.API.VolumeSize(name) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "name": name, - }).Error("Error checking volume size.") - return fmt.Errorf("error occurred when checking volume size") - } - - sameSize, err := utils.VolumeSizeWithinTolerance(int64(sizeBytes), int64(volSize), tridentconfig.SANResizeDelta) - if err != nil { - return err - } - - if sameSize { - log.WithFields(log.Fields{ - "requestedSize": sizeBytes, - "currentVolumeSize": volSize, - "name": name, - "delta": tridentconfig.SANResizeDelta, - }).Info("Requested size and current volume size are within the delta and therefore considered the same size for SAN resize operations.") - return nil - } - - volSizeBytes := uint64(volSize) - if sizeBytes < volSizeBytes { - return fmt.Errorf("requested size %d is less than existing volume size %d", sizeBytes, volSizeBytes) - } - - if aggrLimitsErr := checkAggregateLimitsForFlexvol(name, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - if _, _, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, d.Config.CommonStorageDriverConfig); checkVolumeSizeLimitsError != nil { - return checkVolumeSizeLimitsError - } - - // Resize operations - lunPath := fmt.Sprintf("/vol/%v/lun0", name) - if !d.API.SupportsFeature(api.LunGeometrySkip) { - // Check LUN geometry and verify LUN max size. - lunGeometry, err := d.API.LunGetGeometry(lunPath) - if err != nil { - log.WithField("error", err).Error("LUN resize failed.") - return fmt.Errorf("volume resize failed") - } - - lunMaxSize := lunGeometry.Result.MaxResizeSize() - if lunMaxSize < int(sizeBytes) { - log.WithFields(log.Fields{ - "error": err, - "sizeBytes": sizeBytes, - "lunMaxSize": lunMaxSize, - "lunPath": lunPath, - }).Error("Requested size is larger than LUN's maximum capacity.") - return fmt.Errorf("volume resize failed as requested size is larger than LUN's maximum capacity") - } - } - - // Resize FlexVol - response, err := d.API.VolumeSetSize(name, strconv.FormatUint(sizeBytes, 10)) - if err = api.GetError(response.Result, err); err != nil { - log.WithField("error", err).Error("Volume resize failed.") - return fmt.Errorf("volume resize failed") - } - - // Resize LUN0 - returnSize, err := d.API.LunResize(lunPath, int(sizeBytes)) - if err != nil { - log.WithField("error", err).Error("LUN resize failed.") - return fmt.Errorf("volume resize failed") - } - - volConfig.Size = strconv.FormatUint(returnSize, 10) - return nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_san_economy.go b/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_san_economy.go deleted file mode 100644 index ede3b7b1e..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/ontap/ontap_san_economy.go +++ /dev/null @@ -1,1644 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package ontap - -import ( - "errors" - "fmt" - "math/rand" - "regexp" - "strconv" - "strings" - "time" - - "github.com/RoaringBitmap/roaring" - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/storage" - sa "github.com/netapp/trident/storage_attribute" - drivers "github.com/netapp/trident/storage_drivers" - "github.com/netapp/trident/storage_drivers/ontap/api" - "github.com/netapp/trident/storage_drivers/ontap/api/azgo" - "github.com/netapp/trident/utils" -) - -const ( - maxLunNameLength = 254 - maxLunsPerFlexvol = 100 - snapshotNameSeparator = "_snapshot_" -) - -func GetLUNPathEconomy(bucketName string, volNameInternal string) string { - return fmt.Sprintf("/vol/%s/%s", bucketName, volNameInternal) -} - -type LUNHelper struct { - Config drivers.OntapStorageDriverConfig - Context tridentconfig.DriverContext - SnapshotRegexp *regexp.Regexp -} - -func NewLUNHelper(config drivers.OntapStorageDriverConfig, context tridentconfig.DriverContext) *LUNHelper { - - helper := LUNHelper{} - helper.Config = config - helper.Context = context - regexString := fmt.Sprintf("(?m)/vol/(.+)/%v(.+?)($|_snapshot_(.+))", *helper.Config.StoragePrefix) - helper.SnapshotRegexp = regexp.MustCompile(regexString) - - return &helper -} - -// volName is expected not to have the storage prefix included -// parameters: volName=my-Lun snapName=my-Snapshot -// output: storagePrefix_my_Lun_snapshot_my_Snapshot -// parameters: volName=my-Lun snapName=snapshot-123 -// output: storagePrefix_my_Lun_snapshot_snapshot_123 -func (o *LUNHelper) GetSnapshotName(volName, snapName string) string { - volName = strings.ReplaceAll(volName, "-", "_") - snapName = o.getInternalSnapshotName(snapName) - name := fmt.Sprintf("%v%v%v", *o.Config.StoragePrefix, volName, snapName) - return name -} - -// internalVolName is expected to have the storage prefix included -// parameters: internalVolName=storagePrefix_my-Lun snapName=my-Snapshot -// output: storagePrefix_my_Lun_snapshot_my_Snapshot -func (o *LUNHelper) GetInternalSnapshotName(internalVolName, snapName string) string { - internalVolName = strings.ReplaceAll(internalVolName, "-", "_") - snapName = o.getInternalSnapshotName(snapName) - name := fmt.Sprintf("%v%v", internalVolName, snapName) - return name -} - -// parameters: bucketName=my-Bucket internalVolName=storagePrefix_my-Lun snapName=snap-1 -// output: /vol/my_Bucket/storagePrefix_my_Lun_snapshot_snap_1 -func (o *LUNHelper) GetSnapPath(bucketName, internalVolName, snapName string) string { - bucketName = strings.ReplaceAll(bucketName, "-", "_") - internalVolName = strings.ReplaceAll(internalVolName, "-", "_") - snapName = o.getInternalSnapshotName(snapName) - snapPath := fmt.Sprintf("/vol/%v/%v%v", bucketName, internalVolName, snapName) - return snapPath -} - -// parameter: bucketName=my-Bucket -// output: /vol/my_Bucket/storagePrefix_*_snapshot_* -func (o *LUNHelper) GetSnapPathPattern(bucketName string) string { - bucketName = strings.ReplaceAll(bucketName, "-", "_") - snapPattern := fmt.Sprintf("/vol/%v/%v*"+snapshotNameSeparator+"*", bucketName, *o.Config.StoragePrefix) - return snapPattern -} - -// parameter: volName=my-Vol -// output: /vol/*/storagePrefix_my_Vol_snapshot_* -func (o *LUNHelper) GetSnapPathPatternForVolume(volName string) string { - volName = strings.ReplaceAll(volName, "-", "_") - snapPattern := fmt.Sprintf("/vol/*/%v%v"+snapshotNameSeparator+"*", *o.Config.StoragePrefix, volName) - return snapPattern -} - -// parameter: volName=my-Lun -// output: storagePrefix_my_Lun -// parameter: volName=storagePrefix_my-Lun -// output: storagePrefix_my_Lun -func (o *LUNHelper) GetInternalVolumeName(volName string) string { - volName = strings.ReplaceAll(volName, "-", "_") - if !strings.HasPrefix(volName, *o.Config.StoragePrefix) { - name := fmt.Sprintf("%v%v", *o.Config.StoragePrefix, volName) - return name - } - return volName -} - -// parameter: snapName=snapshot-123 -// output: _snapshot_snapshot_123 -// parameter: snapName=snapshot -// output: _snapshot_snapshot -// parameter: snapName=_____snapshot -// output: _snapshot______snapshot -func (o *LUNHelper) getInternalSnapshotName(snapName string) string { - snapName = strings.ReplaceAll(snapName, "-", "_") - name := fmt.Sprintf("%v%v", snapshotNameSeparator, snapName) - return name -} - -// parameters: bucketName=my-Bucket volName=my-Lun -// output: /vol/my_Bucket/storagePrefix_my_Lun -// parameters: bucketName=my-Bucket volName=storagePrefix_my-Lun -// output: /vol/my_Bucket/storagePrefix_my_Lun -func (o *LUNHelper) GetLUNPath(bucketName, volName string) string { - bucketName = strings.ReplaceAll(bucketName, "-", "_") - volName = o.GetInternalVolumeName(volName) - snapPath := fmt.Sprintf("/vol/%v/%v", bucketName, volName) - return snapPath -} - -// parameter: volName=my-Lun -// output: /vol/*/storagePrefix_my_Vol -func (o *LUNHelper) GetLUNPathPattern(volName string) string { - volName = strings.ReplaceAll(volName, "-", "_") - snapPattern := fmt.Sprintf("/vol/*/%v%v", *o.Config.StoragePrefix, volName) - return snapPattern -} - -// identifies if the given snapLunPath has a valid snapshot name -func (o *LUNHelper) IsValidSnapLUNPath(snapLunPath string) bool { - snapLunPath = strings.ReplaceAll(snapLunPath, "-", "_") - snapshotName := o.GetSnapshotNameFromSnapLUNPath(snapLunPath) - if snapshotName == "" { - return false - } - return true -} - -func (o *LUNHelper) getLunPathComponents(snapLunPath string) []string { - result := o.SnapshotRegexp.FindStringSubmatch(snapLunPath) - // result [0] is the full string: /vol/myBucket/storagePrefix_myLun_snapshot_mySnap - // result [1] is the bucket name: myBucket - // result [2] is the volume name: myLun - // result [3] is _snapshot_mySnap (unused) - // result [4] is the snapshot name: mySnap - return result -} - -// parameter: snapLunPath=/vol/myBucket/storagePrefix_myLun_snapshot_mySnap -// result [4] is the snapshot name: mySnap -func (o *LUNHelper) GetSnapshotNameFromSnapLUNPath(snapLunPath string) string { - result := o.getLunPathComponents(snapLunPath) - if len(result) > 4 { - return result[4] - } - return "" -} - -// parameter: snapLunPath=/vol/myBucket/storagePrefix_myLun_snapshot_mySnap -// result [2] is the volume name: myLun -func (o *LUNHelper) GetVolumeName(lunPath string) string { - result := o.getLunPathComponents(lunPath) - if len(result) > 2 { - return result[2] - } - return "" -} - -// parameter: snapLunPath=/vol/myBucket/storagePrefix_myLun_snapshot_mySnap -// result [1] is the bucket name: myBucket -func (o *LUNHelper) GetBucketName(lunPath string) string { - result := o.getLunPathComponents(lunPath) - if len(result) > 1 { - return result[1] - } - return "" -} - -// SANEconomyStorageDriver is for iSCSI storage provisioning of LUNs -type SANEconomyStorageDriver struct { - initialized bool - Config drivers.OntapStorageDriverConfig - ips []string - API *api.Client - Telemetry *Telemetry - flexvolNamePrefix string - helper *LUNHelper -} - -func (d *SANEconomyStorageDriver) GetConfig() *drivers.OntapStorageDriverConfig { - return &d.Config -} - -func (d *SANEconomyStorageDriver) GetAPI() *api.Client { - return d.API -} - -func (d *SANEconomyStorageDriver) GetTelemetry() *Telemetry { - d.Telemetry.Telemetry = tridentconfig.OrchestratorTelemetry - return d.Telemetry -} - -// Name is for returning the name of this driver -func (d *SANEconomyStorageDriver) Name() string { - return drivers.OntapSANEconomyStorageDriverName -} - -func (d *SANEconomyStorageDriver) FlexvolNamePrefix() string { - return d.flexvolNamePrefix -} - -// Initialize from the provided config -func (d *SANEconomyStorageDriver) Initialize( - context tridentconfig.DriverContext, configJSON string, commonConfig *drivers.CommonStorageDriverConfig, -) error { - - if commonConfig.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Initialize", "Type": "SANEconomyStorageDriver"} - log.WithFields(fields).Debug(">>>> Initialize") - defer log.WithFields(fields).Debug("<<<< Initialize") - } - - // Parse the config - config, err := InitializeOntapConfig(context, configJSON, commonConfig) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - - d.API, err = InitializeOntapDriver(config) - if err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - d.Config = *config - d.helper = NewLUNHelper(d.Config, context) - - d.ips, err = d.API.NetInterfaceGetDataLIFs("iscsi") - if err != nil { - return err - } - - if len(d.ips) == 0 { - return fmt.Errorf("no iSCSI data LIFs found on SVM %s", config.SVM) - } else { - log.WithField("dataLIFs", d.ips).Debug("Found iSCSI LIFs.") - } - - // Remap context for artifact naming so the names remain stable over time - var artifactPrefix string - switch context { - case tridentconfig.ContextDocker: - artifactPrefix = artifactPrefixDocker - case tridentconfig.ContextKubernetes, tridentconfig.ContextCSI: - artifactPrefix = artifactPrefixKubernetes - default: - return fmt.Errorf("unknown driver context: %s", context) - } - - // Set up internal driver state - d.flexvolNamePrefix = fmt.Sprintf("%s_lun_pool_%s_", artifactPrefix, *d.Config.StoragePrefix) - d.flexvolNamePrefix = strings.Replace(d.flexvolNamePrefix, "__", "_", -1) - - log.WithFields(log.Fields{ - "FlexvolNamePrefix": d.flexvolNamePrefix, - }).Debugf("SAN Economy driver settings.") - - if err = InitializeSANDriver(context, d.API, &d.Config, d.validate); err != nil { - return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) - } - - // Set up the autosupport heartbeat - d.Telemetry = NewOntapTelemetry(d) - d.Telemetry.Start() - - d.initialized = true - return nil -} - -func (d *SANEconomyStorageDriver) Initialized() bool { - return d.initialized -} - -func (d *SANEconomyStorageDriver) Terminate() { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Terminate", "Type": "SANEconomyStorageDriver"} - log.WithFields(fields).Debug(">>>> Terminate") - defer log.WithFields(fields).Debug("<<<< Terminate") - } - - if d.Telemetry != nil { - d.Telemetry.Stop() - } - - d.initialized = false -} - -// Validate the driver configuration and execution environment -func (d *SANEconomyStorageDriver) validate() error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "validate", "Type": "SANEconomyStorageDriver"} - log.WithFields(fields).Debug(">>>> validate") - defer log.WithFields(fields).Debug("<<<< validate") - } - - if err := ValidateSANDriver(d.API, &d.Config, d.ips); err != nil { - return fmt.Errorf("error driver validation failed: %v", err) - } - - return nil -} - -// Create a volume+LUN with the specified options -func (d *SANEconomyStorageDriver) Create( - volConfig *storage.VolumeConfig, storagePool *storage.Pool, volAttributes map[string]sa.Request, -) error { - - name := volConfig.InternalName - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Create", - "Type": "SANEconomyStorageDriver", - "name": name, - "attrs": volAttributes, - } - log.WithFields(fields).Debug(">>>> Create") - defer log.WithFields(fields).Debug("<<<< Create") - } - - // Generic user-facing message - createError := errors.New("error volume creation failed") - - // Determine a way to see if the volume already exists - exists, existsInFlexvol, err := d.LUNExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing volume: %v", err) - return createError - } - if exists { - log.WithFields(log.Fields{"LUN": name, "bucketVol": existsInFlexvol}).Debug("LUN already exists.") - return drivers.NewVolumeExistsError(name) - } - - // Determine volume size in bytes - requestedSize, err := utils.ConvertSizeToBytes(volConfig.Size) - if err != nil { - return fmt.Errorf("error could not convert volume size %s: %v", volConfig.Size, err) - } - sizeBytes, err := strconv.ParseUint(requestedSize, 10, 64) - if err != nil { - return fmt.Errorf("error %v is an invalid volume size: %v", volConfig.Size, err) - } - sizeBytes, err = GetVolumeSize(sizeBytes, d.Config) - if err != nil { - return err - } - - // Ensure LUN name isn't too long - if len(name) > maxLunNameLength { - return fmt.Errorf("volume %s name exceeds the limit of %d characters", name, maxLunNameLength) - } - - // Get options - opts, err := d.GetVolumeOpts(volConfig, storagePool, volAttributes) - if err != nil { - return err - } - - // Get Flexvol options with default fallback values - // see also: ontap_common.go#PopulateConfigurationDefaults - spaceAllocation, _ := strconv.ParseBool(utils.GetV(opts, "spaceAllocation", d.Config.SpaceAllocation)) - spaceReserve := utils.GetV(opts, "spaceReserve", d.Config.SpaceReserve) - snapshotPolicy := utils.GetV(opts, "snapshotPolicy", d.Config.SnapshotPolicy) - aggregate := utils.GetV(opts, "aggregate", d.Config.Aggregate) - encryption := utils.GetV(opts, "encryption", d.Config.Encryption) - - if aggrLimitsErr := checkAggregateLimits(aggregate, spaceReserve, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - enableSnapshotDir := false - - enableEncryption, err := strconv.ParseBool(encryption) - if err != nil { - return fmt.Errorf("invalid boolean value for encryption: %v", err) - } - - // Check for a supported file system type - fstype, err := drivers.CheckSupportedFilesystem(utils.GetV(opts, "fstype|fileSystemType", d.Config.FileSystemType), name) - if err != nil { - return err - } - - // Make sure we have a Flexvol for the new LUN - bucketVol, err := d.ensureFlexvolForLUN(aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, enableEncryption, - sizeBytes, opts, d.Config) - if err != nil { - log.Errorf("BucketVol location/creation failed: %v", err) - return createError - } - - // Grow or shrink the Flexvol as needed - err = d.resizeFlexvol(bucketVol, sizeBytes) - if err != nil { - return createError - } - - lunPath := GetLUNPathEconomy(bucketVol, name) - osType := "linux" - - // Create the LUN - lunCreateResponse, err := d.API.LunCreate(lunPath, int(sizeBytes), osType, false, spaceAllocation) - if err = api.GetError(lunCreateResponse, err); err != nil { - return fmt.Errorf("error creating LUN: %v", err) - } - - // Save the fstype in a LUN attribute so we know what to do in Attach - attrResponse, err := d.API.LunSetAttribute(lunPath, LUNAttributeFSType, fstype) - if err = api.GetError(attrResponse, err); err != nil { - defer d.API.LunDestroy(lunPath) - return fmt.Errorf("error saving file system type for LUN: %v", err) - } - // Save the context - attrResponse, err = d.API.LunSetAttribute(lunPath, "context", string(d.Config.DriverContext)) - if err = api.GetError(attrResponse, err); err != nil { - log.WithField("name", name).Warning("Failed to save the driver context attribute for new volume.") - } - - return nil -} - -// Create a volume clone -func (d *SANEconomyStorageDriver) CreateClone(volConfig *storage.VolumeConfig) error { - - source := volConfig.CloneSourceVolumeInternal - name := volConfig.InternalName - snapshot := volConfig.CloneSourceSnapshot - isFromSnapshot := snapshot != "" - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateClone", - "Type": "SANEconomyStorageDriver", - "name": name, - "source": source, - "snapshot": snapshot, - } - log.WithFields(fields).Debug(">>>> CreateClone") - defer log.WithFields(fields).Debug("<<<< CreateClone") - } - - return d.createLUNClone(name, source, snapshot, &d.Config, d.API, d.FlexvolNamePrefix(), isFromSnapshot) -} - -// Create a volume clone -func (d *SANEconomyStorageDriver) createLUNClone( - lunName, source, snapshot string, config *drivers.OntapStorageDriverConfig, client *api.Client, prefix string, isLunCreateFromSnapshot bool, -) error { - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "createLUNClone", - "Type": "ontap_san_economy", - "lunName": lunName, - "source": source, - "snapshot": snapshot, - "prefix": prefix, - } - log.WithFields(fields).Debug(">>>> createLUNClone") - defer log.WithFields(fields).Debug("<<<< createLUNClone") - } - - // If the specified LUN copy already exists, return an error - destinationLunExists, _, err := d.LUNExists(lunName, prefix) - if err != nil { - return fmt.Errorf("error checking for existing LUN: %v", err) - } - if destinationLunExists { - return fmt.Errorf("error LUN %s already exists", lunName) - } - - // Check if called from CreateClone and is from a snapshot - if isLunCreateFromSnapshot { - source = d.helper.GetInternalSnapshotName(source, snapshot) - } - - // If the source doesn't exist, return an error - sourceLunExists, flexvol, err := d.LUNExists(source, prefix) - if err != nil { - return fmt.Errorf("error checking for existing LUN: %v", err) - } - if !sourceLunExists { - return fmt.Errorf("error LUN %s does not exist", source) - } - - // Create the clone based on given LUN - cloneResponse, err := client.LunCloneCreate(flexvol, source, lunName) - if err != nil { - return fmt.Errorf("error creating clone: %v", err) - } - if zerr := api.NewZapiError(cloneResponse); !zerr.IsPassed() { - if zerr.Code() == azgo.EOBJECTNOTFOUND { - return fmt.Errorf("snapshot %s does not exist in volume %s", snapshot, source) - } else if zerr.IsFailedToLoadJobError() { - fields := log.Fields{ - "zerr": zerr, - } - log.WithFields(fields).Warn("Problem encountered during the clone create operation," + - " attempting to verify the clone was actually created") - if volumeLookupError := probeForVolume(lunName, client); volumeLookupError != nil { - return volumeLookupError - } - } else { - return fmt.Errorf("error creating clone: %v", zerr) - } - } - - // Grow or shrink the Flexvol as needed - return d.resizeFlexvol(flexvol, 0) -} - -func (d *SANEconomyStorageDriver) Import(volConfig *storage.VolumeConfig, originalName string) error { - return errors.New("import is not implemented") -} - -func (d *SANEconomyStorageDriver) Rename(name string, newName string) error { - return errors.New("rename is not implemented") -} - -// Destroy the LUN -func (d *SANEconomyStorageDriver) Destroy(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Destroy", - "Type": "SANEconomyStorageDriver", - "Name": name, - } - log.WithFields(fields).Debug(">>>> Destroy") - defer log.WithFields(fields).Debug("<<<< Destroy") - } - - var ( - err error - iSCSINodeName string - lunID int - ) - - // Generic user-facing message - deleteError := errors.New("volume deletion failed") - - exists, bucketVol, err := d.LUNExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return err - } - if !exists { - log.Warnf("LUN %v does not exist", name) - return nil - } - - lunPath := GetLUNPathEconomy(bucketVol, d.helper.GetInternalVolumeName(name)) - if d.Config.DriverContext == tridentconfig.ContextDocker { - - // Get target info - iSCSINodeName, _, err = GetISCSITargetInfo(d.API, &d.Config) - if err != nil { - log.WithField("error", err).Error("Could not get target info") - return err - } - - // Get the LUN ID - lunMapResponse, err := d.API.LunMapListInfo(lunPath) - if err != nil { - return fmt.Errorf("error reading LUN maps for volume %s, path %s: %v", name, lunPath, err) - } - lunID = -1 - if lunMapResponse.Result.InitiatorGroupsPtr != nil { - for _, lunMapResponse := range lunMapResponse.Result.InitiatorGroupsPtr.InitiatorGroupInfoPtr { - if lunMapResponse.InitiatorGroupName() == d.Config.IgroupName { - lunID = lunMapResponse.LunId() - } - } - } - if lunID >= 0 { - // Inform the host about the device removal - utils.PrepareDeviceForRemoval(lunID, iSCSINodeName) - } - } - - // Before deleting the LUN, check if a LUN has associated snapshots. If so, delete all associated snapshots - internalName := d.helper.GetVolumeName(lunPath) - snapList, err := d.getSnapshotsEconomy(internalName, name) - if err != nil { - log.Errorf("Error enumerating snapshots: %v", err) - return deleteError - } - for _, snap := range snapList { - err = d.DeleteSnapshot(snap.Config) - if err != nil { - log.Errorf("Error snap-LUN delete failed: %v", err) - return err - } - } - - offlineResponse, err := d.API.LunOffline(lunPath) - if err != nil { - fields := log.Fields{ - "Method": "Destroy", - "Type": "SANEconomyStorageDriver", - "LUN": lunPath, - "Response": offlineResponse, - "Error": err, - } - log.WithFields(fields) - } - - destroyResponse, err := d.API.LunDestroy(lunPath) - if err = api.GetError(destroyResponse, err); err != nil { - log.Errorf("Error LUN delete failed: %v", err) - return deleteError - } - // Check if a bucket volume has no more LUNs. If none left, delete the bucketVol. Else, call for resize - return d.DeleteBucketIfEmpty(bucketVol) -} - -// DeleteBucketIfEmpty will check if the given bucket volume is empty, if the bucket is empty it will be deleted. -// Otherwise, it will be resized. -func (d *SANEconomyStorageDriver) DeleteBucketIfEmpty(bucketVol string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Destroy", - "Type": "SANEconomyStorageDriver", - "bucketVol": bucketVol, - } - log.WithFields(fields).Debug(">>>> DeleteBucketIfEmpty") - defer log.WithFields(fields).Debug("<<<< DeleteBucketIfEmpty") - } - - count, err := d.API.LunCount(bucketVol) - if err != nil { - return fmt.Errorf("error enumerating LUNs for volume %s: %v", bucketVol, err) - } - if count == 0 { - // Delete the bucketVol - volDestroyResponse, err := d.API.VolumeDestroy(bucketVol, true) - if err != nil { - return fmt.Errorf("error destroying volume %v: %v", bucketVol, err) - } - if zerr := api.NewZapiError(volDestroyResponse); !zerr.IsPassed() { - if zerr.Code() == azgo.EVOLUMEDOESNOTEXIST { - log.WithField("volume", bucketVol).Warn("Volume already deleted.") - } else { - return fmt.Errorf("error destroying volume %v: %v", bucketVol, zerr) - } - } - } else { - // Grow or shrink the Flexvol as needed - err = d.resizeFlexvol(bucketVol, 0) - if err != nil { - return err - } - } - return nil -} - -// Publish the volume to the host specified in publishInfo. This method may or may not be running on the host -// where the volume will be mounted, so it should limit itself to updating access rules, initiator groups, etc. -// that require some host identity (but not locality) as well as storage controller API access. -func (d *SANEconomyStorageDriver) Publish(name string, publishInfo *utils.VolumePublishInfo) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Publish", - "Type": "SANEconomyStorageDriver", - "name": name, - "publishInfo": publishInfo, - } - log.WithFields(fields).Debug(">>>> Publish") - defer log.WithFields(fields).Debug("<<<< Publish") - } - - exists, bucketVol, err := d.LUNExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return err - } - if !exists { - return fmt.Errorf("error LUN %v does not exist", name) - } - - lunPath := d.helper.GetLUNPath(bucketVol, name) - igroupName := d.Config.IgroupName - - // Get target info - iSCSINodeName, _, err := GetISCSITargetInfo(d.API, &d.Config) - if err != nil { - return err - } - - err = PublishLUN(d.API, &d.Config, d.ips, publishInfo, lunPath, igroupName, iSCSINodeName) - if err != nil { - return fmt.Errorf("error publishing %s driver: %v", d.Name(), err) - } - - return nil -} - -// GetSnapshot gets a snapshot. To distinguish between an API error reading the snapshot -// and a non-existent snapshot, this method may return (nil, nil). -func (d *SANEconomyStorageDriver) GetSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshot", - "Type": "SANEconomyStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> GetSnapshot") - defer log.WithFields(fields).Debug("<<<< GetSnapshot") - } - - return d.getSnapshotEconomy(snapConfig, &d.Config, d.API) -} - -func (d *SANEconomyStorageDriver) getSnapshotEconomy( - snapConfig *storage.SnapshotConfig, config *drivers.OntapStorageDriverConfig, client *api.Client, -) (*storage.Snapshot, error) { - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - if config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "getSnapshotEconomy", - "Type": "SANEconomyStorageDriver", - "snapshotName": internalSnapName, - "volumeName": internalVolName, - } - log.WithFields(fields).Debug(">>>> getSnapshotEconomy") - defer log.WithFields(fields).Debug("<<<< getSnapshotEconomy") - } - - fullSnapshotName := d.helper.GetInternalSnapshotName(internalVolName, internalSnapName) - exists, bucketVol, err := d.LUNExists(fullSnapshotName, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return nil, err - } - if !exists { - return nil, nil - } - - snapPath := d.helper.GetSnapPath(bucketVol, internalVolName, internalSnapName) - lunInfo, err := d.API.LunGet(snapPath) - if err != nil { - return nil, fmt.Errorf("error reading volume: %v", err) - } - - return &storage.Snapshot{ - Config: snapConfig, - Created: time.Unix(int64(lunInfo.CreationTimestamp()), 0).UTC().Format(storage.SnapshotTimestampFormat), - SizeBytes: int64(lunInfo.Size()), - }, nil - -} - -func (d *SANEconomyStorageDriver) GetSnapshots(volConfig *storage.VolumeConfig) ([]*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "GetSnapshots", - "Type": "SANEconomyStorageDriver", - "internalVolumeName": volConfig.InternalName, - "volumeName": volConfig.Name, - } - log.WithFields(fields).Debug(">>>> GetSnapshots") - defer log.WithFields(fields).Debug("<<<< GetSnapshots") - } - - internalVolName := d.helper.GetInternalVolumeName(volConfig.Name) - exists, _, err := d.LUNExists(internalVolName, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return nil, err - } - if !exists { - return nil, fmt.Errorf("error LUN %v does not exist", volConfig.Name) - } - - return d.getSnapshotsEconomy(volConfig.InternalName, volConfig.Name) -} - -func (d *SANEconomyStorageDriver) getSnapshotsEconomy( - internalVolName string, volumeName string, -) ([]*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "getSnapshotsEconomy", - "Type": "SANEconomyStorageDriver", - "volumeName": internalVolName, - "name": volumeName, - } - log.WithFields(fields).Debug(">>>> getSnapshotsEconomy") - defer log.WithFields(fields).Debug("<<<< getSnapshotsEconomy") - } - - snapPathPattern := d.helper.GetSnapPathPatternForVolume(volumeName) - - snapListResponse, err := d.API.LunGetAll(snapPathPattern) - if err = api.GetError(snapListResponse, err); err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) - } - - log.Debugf("Found %v snapshots.", snapListResponse.Result.NumRecords()) - snapshots := make([]*storage.Snapshot, 0) - - if snapListResponse.Result.AttributesListPtr == nil { - return nil, fmt.Errorf("error snapshot attribute pointer nil") - } - - for _, snapLunInfo := range snapListResponse.Result.AttributesListPtr.LunInfoPtr { - snapLunPath := string(snapLunInfo.Path()) - // Check to see if it has the following string pattern. If so, add to snapshot List. Else, skip. - if d.helper.IsValidSnapLUNPath(snapLunPath) { - snapLunName := d.helper.GetSnapshotNameFromSnapLUNPath(snapLunPath) - snapshot := &storage.Snapshot{ - Config: &storage.SnapshotConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: snapLunName, - InternalName: snapLunName, - VolumeName: volumeName, - VolumeInternalName: internalVolName, - }, - Created: time.Unix(int64(snapLunInfo.CreationTimestamp()), 0). - UTC().Format(storage.SnapshotTimestampFormat), - SizeBytes: int64(snapLunInfo.Size()), - } - snapshots = append(snapshots, snapshot) - } - } - return snapshots, nil -} - -// CreateSnapshot creates a snapshot for the given volume. -func (d *SANEconomyStorageDriver) CreateSnapshot(snapConfig *storage.SnapshotConfig) (*storage.Snapshot, error) { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateSnapshot", - "Type": "SANEconomyStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - "snapConfig": snapConfig, - } - log.WithFields(fields).Info(">>>> CreateSnapshot") - defer log.WithFields(fields).Info("<<<< CreateSnapshot") - } - - internalSnapName := snapConfig.InternalName - internalVolName := snapConfig.VolumeInternalName - - // Check to see if source LUN exists - _, bucketVol, err := d.LUNExists(snapConfig.VolumeInternalName, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return nil, err - } - - internalVolName = GetLUNPathEconomy(bucketVol, snapConfig.VolumeInternalName) - - // If the specified volume doesn't exist, return error - lunInfo, err := d.API.LunGet(internalVolName) - if err != nil { - return nil, fmt.Errorf("error checking for existing volume: %v", err) - } - - if lunInfo.SizePtr == nil { - return nil, fmt.Errorf("error reading volume size: %v", err) - } - size := lunInfo.Size() - - // Create the snapshot name/string - lunName := d.helper.GetSnapshotName(snapConfig.VolumeName, internalSnapName) - - // Create the "snap-LUN" where the snapshot is a LUN clone of the source LUN - err = d.createLUNClone(lunName, snapConfig.VolumeInternalName, snapConfig.Name, &d.Config, d.API, d.FlexvolNamePrefix(), false) - if err != nil { - return nil, fmt.Errorf("could not create snapshot: %v", err) - } - - // Fetching list of snapshots to get snapshot creation time - snapListResponse, err := d.getSnapshotsEconomy(internalVolName, snapConfig.VolumeName) - if err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) - } - - if snapListResponse != nil { - for _, snap := range snapListResponse { - return &storage.Snapshot{ - Config: snapConfig, - Created: snap.Created, - SizeBytes: int64(size), - }, nil - } - } - return nil, fmt.Errorf("could not find snapshot %s for souce volume %s", internalSnapName, internalVolName) -} - -// RestoreSnapshot restores a volume (in place) from a snapshot. -func (d *SANEconomyStorageDriver) RestoreSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "RestoreSnapshot", - "Type": "SANEconomyStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - } - log.WithFields(fields).Debug(">>>> RestoreSnapshot") - defer log.WithFields(fields).Debug("<<<< RestoreSnapshot") - } - - return drivers.NewSnapshotsNotSupportedError(d.Name()) -} - -// DeleteSnapshot deletes a LUN snapshot. -func (d *SANEconomyStorageDriver) DeleteSnapshot(snapConfig *storage.SnapshotConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "DeleteSnapshot", - "Type": "SANEconomyStorageDriver", - "snapshotName": snapConfig.InternalName, - "volumeName": snapConfig.VolumeInternalName, - "snapConfig.VolumeName": snapConfig.VolumeName, - } - log.WithFields(fields).Debug(">>>> DeleteSnapshot") - defer log.WithFields(fields).Debug("<<<< DeleteSnapshot") - } - - internalSnapName := snapConfig.InternalName - // Creating the path string pattern - snapLunName := d.helper.GetSnapshotName(snapConfig.VolumeName, internalSnapName) - - // Check to see if the source LUN exists - exists, bucketVol, err := d.LUNExists(snapLunName, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return err - } - if !exists { - return fmt.Errorf("error LUN %v does not exist", snapConfig.VolumeName) - } - - snapPath := GetLUNPathEconomy(bucketVol, snapLunName) - - offlineResponse, err := d.API.LunOffline(snapPath) - if err != nil { - log.WithFields(log.Fields{ - "Method": "DeleteSnapshot", - "Type": "SANEconomyStorageDriver", - "snap-LUN": snapPath, - "Response": offlineResponse, - "Error": err, - }).Warn("Error attempting to offline snap-LUN.") - } - - destroyResponse, err := d.API.LunDestroy(snapPath) - if err = api.GetError(destroyResponse, err); err != nil { - log.Errorf("Snap-LUN delete failed: %v", err) - return fmt.Errorf("error deleting snapshot: %v", err) - } - - // Check if a bucket volume has no more LUNs. If none left, delete the bucketVol. Else, call for resize - return d.DeleteBucketIfEmpty(bucketVol) -} - -// Test for the existence of a volume -func (d *SANEconomyStorageDriver) Get(name string) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{"Method": "Get", "Type": "SANEconomyStorageDriver"} - log.WithFields(fields).Debug(">>>> Get") - defer log.WithFields(fields).Debug("<<<< Get") - } - - // Generic user-facing message - getError := fmt.Errorf("volume %s not found", name) - - internalVolName := d.helper.GetInternalVolumeName(name) - exists, bucketVol, err := d.LUNExists(internalVolName, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing LUN: %v", err) - return getError - } - if !exists { - log.WithField("LUN", name).Debug("LUN not found.") - return getError - } - - log.WithFields(log.Fields{"LUN": name, "bucketVol": bucketVol}).Debug("Volume found.") - - return nil -} - -// ensureFlexvolForLUN accepts a set of Flexvol characteristics and either finds one to contain a new -// LUN or it creates a new Flexvol with the needed attributes. -func (d *SANEconomyStorageDriver) ensureFlexvolForLUN( - aggregate, spaceReserve, snapshotPolicy string, enableSnapshotDir bool, encrypt bool, - sizeBytes uint64, opts map[string]string, config drivers.OntapStorageDriverConfig, -) (string, error) { - - shouldLimitVolumeSize, flexvolSizeLimit, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, - config.CommonStorageDriverConfig) - if checkVolumeSizeLimitsError != nil { - return "", checkVolumeSizeLimitsError - } - - // Check if a suitable Flexvol already exists - flexvol, err := d.getFlexvolForLUN(aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, encrypt, sizeBytes, - shouldLimitVolumeSize, flexvolSizeLimit) - - if err != nil { - return "", fmt.Errorf("error finding Flexvol for LUN: %v", err) - } - - // Found one - if flexvol != "" { - return flexvol, nil - } - - // Nothing found, so create a suitable Flexvol - flexvol, err = d.createFlexvolForLUN(aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, encrypt, opts) - if err != nil { - return "", fmt.Errorf("error creating Flexvol for LUN: %v", err) - } - - return flexvol, nil -} - -// createFlexvolForLUN creates a new Flexvol matching the specified attributes for -// the purpose of containing LUN supplied as container volumes by this driver. -// Once this method returns, the Flexvol exists, is mounted -func (d *SANEconomyStorageDriver) createFlexvolForLUN( - aggregate, spaceReserve, snapshotPolicy string, enableSnapshotDir bool, encrypt bool, opts map[string]string, -) (string, error) { - - flexvol := d.FlexvolNamePrefix() + utils.RandomString(10) - size := "1g" - unixPermissions := utils.GetV(opts, "unixPermissions", d.Config.UnixPermissions) - exportPolicy := utils.GetV(opts, "exportPolicy", d.Config.ExportPolicy) - securityStyle := utils.GetV(opts, "securityStyle", d.Config.SecurityStyle) - - encryption := encrypt - - snapshotReserveInt, err := GetSnapshotReserve(snapshotPolicy, d.Config.SnapshotReserve) - if err != nil { - return "", fmt.Errorf("invalid value for snapshotReserve: %v", err) - } - - log.WithFields(log.Fields{ - "name": flexvol, - "aggregate": aggregate, - "size": size, - "spaceReserve": spaceReserve, - "snapshotPolicy": snapshotPolicy, - "snapshotReserve": snapshotReserveInt, - "unixPermissions": unixPermissions, - "snapshotDir": enableSnapshotDir, - "exportPolicy": exportPolicy, - "securityStyle": securityStyle, - "encryption": encryption, - }).Debug("Creating Flexvol for LUNs.") - - // Create the flexvol - volCreateResponse, err := d.API.VolumeCreate( - flexvol, aggregate, size, spaceReserve, snapshotPolicy, - unixPermissions, exportPolicy, securityStyle, encrypt, snapshotReserveInt) - - if err = api.GetError(volCreateResponse, err); err != nil { - return "", fmt.Errorf("error creating volume: %v", err) - } - - // Disable '.snapshot' to allow official mysql container's chmod-in-init to work - if !enableSnapshotDir { - snapDirResponse, err := d.API.VolumeDisableSnapshotDirectoryAccess(flexvol) - if err = api.GetError(snapDirResponse, err); err != nil { - return "", fmt.Errorf("error disabling snapshot directory access: %v", err) - } - } - - return flexvol, nil -} - -// getFlexvolForLUN returns a Flexvol (from the set of existing Flexvols) that -// matches the specified Flexvol attributes and does not already contain more -// than the maximum configured number of LUNs. No matching Flexvols is not -// considered an error. If more than one matching Flexvol is found, one of those -// is returned at random. -func (d *SANEconomyStorageDriver) getFlexvolForLUN( - aggregate, spaceReserve, snapshotPolicy string, enableSnapshotDir bool, encrypt bool, - sizeBytes uint64, shouldLimitFlexvolSize bool, flexvolSizeLimit uint64, -) (string, error) { - - // Get all volumes matching the specified attributes - volListResponse, err := d.API.VolumeListByAttrs( - d.FlexvolNamePrefix(), aggregate, spaceReserve, snapshotPolicy, enableSnapshotDir, encrypt) - - if err = api.GetError(volListResponse, err); err != nil { - return "", fmt.Errorf("error enumerating Flexvols: %v", err) - } - - // Weed out the Flexvols: - // 1) already having too many LUNs - // 2) exceeding size limits - var volumes []string - if volListResponse.Result.AttributesListPtr != nil { - for _, volAttrs := range volListResponse.Result.AttributesListPtr.VolumeAttributesPtr { - volIDAttrs := volAttrs.VolumeIdAttributes() - volName := string(volIDAttrs.Name()) - // skip flexvols over the size limit - if shouldLimitFlexvolSize { - sizeWithRequest, err := d.getOptimalSizeForFlexvol(volName, sizeBytes) - if err != nil { - log.Errorf("Error checking size for existing LUN %v: %v", volName, err) - continue - } - if sizeWithRequest > flexvolSizeLimit { - log.Debugf("Flexvol size for %v is over the limit of %v", volName, flexvolSizeLimit) - continue - } - } - - count := 0 - listResponse, err := d.API.LunGetAllForVolume(volName) - if err != nil { - return "", fmt.Errorf("error enumerating LUNs: %v", err) - } - if listResponse.Result.AttributesListPtr != nil { - for _, lunInfo := range listResponse.Result.AttributesListPtr.LunInfoPtr { - lunPath := lunInfo.Path() - if !d.helper.IsValidSnapLUNPath(lunPath) { - count++ - } - } - } - - if count < maxLunsPerFlexvol { - volumes = append(volumes, volName) - } - } - } - - // Pick a Flexvol. If there are multiple matches, pick one at random. - switch len(volumes) { - case 0: - return "", nil - case 1: - return volumes[0], nil - default: - return volumes[rand.Intn(len(volumes))], nil - } -} - -// getOptimalSizeForFlexvol sums up all the LUN sizes on a Flexvol and adds the size of -// the new LUN being added as well as the current Flexvol snapshot reserve. This value may be used -// to grow the Flexvol as new LUNs are being added. -func (d *SANEconomyStorageDriver) getOptimalSizeForFlexvol(flexvol string, newLunSizeBytes uint64) (uint64, error) { - - // Get more info about the Flexvol - volAttrs, err := d.API.VolumeGet(flexvol) - if err != nil { - return 0, err - } - volSpaceAttrs := volAttrs.VolumeSpaceAttributes() - snapReserveDivisor := 1.0 - (float64(volSpaceAttrs.PercentageSnapshotReserve()) / 100.0) - - totalDiskLimitBytes, err := d.getTotalLUNSize(flexvol) - if err != nil { - return 0, err - } - - usableSpaceBytes := float64(newLunSizeBytes + totalDiskLimitBytes) - flexvolSizeBytes := uint64(usableSpaceBytes / snapReserveDivisor) - - log.WithFields(log.Fields{ - "flexvol": flexvol, - "snapReserveDivisor": snapReserveDivisor, - "totalDiskLimitBytes": totalDiskLimitBytes, - "newLUNSizeBytes": newLunSizeBytes, - "flexvolSizeBytes": flexvolSizeBytes, - }).Debug("Calculated optimal size for Flexvol with new LUN.") - - return flexvolSizeBytes, nil -} - -//getLUNSize returns the size of the LUN -func (d *SANEconomyStorageDriver) getLUNSize(name string, flexvol string) (uint64, error) { - - lunTarget := GetLUNPathEconomy(flexvol, name) - lun, err := d.API.LunGet(lunTarget) - if err != nil { - return 0, err - } - - lunSize := uint64(lun.Size()) - if lunSize == 0 { - return 0, fmt.Errorf("unable to determine LUN size") - } - return lunSize, nil -} - -//getTotalLUNSize returns the sum of all LUN sizes on a Flexvol -func (d *SANEconomyStorageDriver) getTotalLUNSize(flexvol string) (uint64, error) { - - listResponse, err := d.API.LunGetAllForVolume(flexvol) - if err != nil { - return 0, err - } - - var totalDiskLimit uint64 - - if listResponse.Result.AttributesListPtr != nil { - for _, lunInfo := range listResponse.Result.AttributesListPtr.LunInfoPtr { - diskLimitSize := lunInfo.Size() - totalDiskLimit += uint64(diskLimitSize) - } - } - return totalDiskLimit, nil -} - -// Retrieve storage backend capabilities -func (d *SANEconomyStorageDriver) GetStorageBackendSpecs(backend *storage.Backend) error { - if d.Config.BackendName == "" { - // Use the old naming scheme if no name is specified - backend.Name = "ontapsaneco_" + d.ips[0] - } else { - backend.Name = d.Config.BackendName - } - poolAttrs := d.getStoragePoolAttributes() - return getStorageBackendSpecsCommon(d, backend, poolAttrs) -} - -func (d *SANEconomyStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { - - return map[string]sa.Offer{ - sa.BackendType: sa.NewStringOffer(d.Name()), - sa.Snapshots: sa.NewBoolOffer(true), - sa.Clones: sa.NewBoolOffer(true), - sa.Encryption: sa.NewBoolOffer(true), - sa.ProvisioningType: sa.NewStringOffer("thick", "thin"), - } -} - -func (d *SANEconomyStorageDriver) GetVolumeOpts( - volConfig *storage.VolumeConfig, pool *storage.Pool, requests map[string]sa.Request, -) (map[string]string, error) { - return getVolumeOptsCommon(volConfig, pool, requests), nil -} - -func (d *SANEconomyStorageDriver) GetInternalVolumeName(name string) string { - return getInternalVolumeNameCommon(d.Config.CommonStorageDriverConfig, name) -} - -func (d *SANEconomyStorageDriver) CreatePrepare(volConfig *storage.VolumeConfig) error { - return createPrepareCommon(d, volConfig) -} - -func (d *SANEconomyStorageDriver) CreateFollowup(volConfig *storage.VolumeConfig) error { - - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "CreateFollowup", - "Type": "SANEconomyStorageDriver", - "name": volConfig.Name, - "internalName": volConfig.InternalName, - } - log.WithFields(fields).Debug(">>>> CreateFollowup") - defer log.WithFields(fields).Debug("<<<< CreateFollowup") - } - - if d.Config.DriverContext == tridentconfig.ContextDocker { - log.Debug("No follow-up create actions for Docker.") - return nil - } - - return d.mapOntapSANLUN(volConfig) -} - -func (d *SANEconomyStorageDriver) mapOntapSANLUN(volConfig *storage.VolumeConfig) error { - - // Determine which flexvol contains the LUN - exists, flexvol, err := d.LUNExists(volConfig.InternalName, d.FlexvolNamePrefix()) - if err != nil { - return fmt.Errorf("could not determine if LUN %s exists: %v", volConfig.InternalName, err) - } - if !exists { - return fmt.Errorf("could not find LUN %s", volConfig.InternalName) - } - // Map LUN - lunPath := GetLUNPathEconomy(flexvol, volConfig.InternalName) - lunID, err := d.API.LunMapIfNotMapped(d.Config.IgroupName, lunPath) - if err != nil { - return err - } - - err = PopulateOntapLunMapping(d.API, &d.Config, d.ips, volConfig, lunID) - if err != nil { - return fmt.Errorf("error mapping LUN for %s driver: %v", d.Name(), err) - } - - return nil -} - -func (d *SANEconomyStorageDriver) GetProtocol() tridentconfig.Protocol { - return tridentconfig.Block -} - -func (d *SANEconomyStorageDriver) StoreConfig(b *storage.PersistentStorageBackendConfig) { - drivers.SanitizeCommonStorageDriverConfig(d.Config.CommonStorageDriverConfig) - b.OntapConfig = &d.Config -} - -func (d *SANEconomyStorageDriver) GetExternalConfig() interface{} { - return getExternalConfig(d.Config) -} - -// GetVolumeExternal queries the storage backend for all relevant info about -// a single container volume managed by this driver and returns a VolumeExternal -// representation of the volume. -func (d *SANEconomyStorageDriver) GetVolumeExternal(name string) (*storage.VolumeExternal, error) { - - _, flexvol, err := d.LUNExists(name, d.FlexvolNamePrefix()) - if err != nil { - return nil, err - } - - volumeAttrs, err := d.API.VolumeGet(flexvol) - if err != nil { - return nil, err - } - - lunPath := GetLUNPathEconomy(flexvol, name) - lunAttrs, err := d.API.LunGet(lunPath) - if err != nil { - return nil, err - } - - return d.getVolumeExternal(lunAttrs, volumeAttrs), nil -} - -// GetVolumeExternalWrappers queries the storage backend for all relevant info about -// container volumes managed by this driver. It then writes a VolumeExternal -// representation of each volume to the supplied channel, closing the channel -// when finished. -func (d *SANEconomyStorageDriver) GetVolumeExternalWrappers( - channel chan *storage.VolumeExternalWrapper) { - - // Let the caller know we're done by closing the channel - defer close(channel) - - // Get all volumes matching the storage prefix - volumesResponse, err := d.API.VolumeGetAll(d.FlexvolNamePrefix()) - if err = api.GetError(volumesResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Bail out early if there aren't any Flexvols - if volumesResponse.Result.AttributesListPtr == nil { - return - } - if len(volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr) == 0 { - return - } - - // Get all LUNs in volumes matching the storage prefix - lunPathPattern := fmt.Sprintf("/vol/%v/*", d.flexvolNamePrefix+"*") - lunsResponse, err := d.API.LunGetAll(lunPathPattern) - if err = api.GetError(lunsResponse, err); err != nil { - channel <- &storage.VolumeExternalWrapper{Volume: nil, Error: err} - return - } - - // Make a map of volumes for faster correlation with LUNs - volumeMap := make(map[string]azgo.VolumeAttributesType) - if volumesResponse.Result.AttributesListPtr != nil { - for _, volumeAttrs := range volumesResponse.Result.AttributesListPtr.VolumeAttributesPtr { - internalName := string(volumeAttrs.VolumeIdAttributesPtr.Name()) - volumeMap[internalName] = volumeAttrs - } - } - - // Convert all LUNs to VolumeExternal and write them to the channel - if lunsResponse.Result.AttributesListPtr != nil { - for _, lun := range lunsResponse.Result.AttributesListPtr.LunInfoPtr { - volume, ok := volumeMap[lun.Volume()] - if !ok { - log.WithField("path", lun.Path()).Warning("Flexvol not found for LUN.") - continue - } - - channel <- &storage.VolumeExternalWrapper{Volume: d.getVolumeExternal(&lun, &volume), Error: nil} - } - } -} - -// getVolumeExternal is a private method that accepts info about a volume -// as returned by the storage backend and formats it as a VolumeExternal -// object. -func (d *SANEconomyStorageDriver) getVolumeExternal( - lunAttrs *azgo.LunInfoType, volumeAttrs *azgo.VolumeAttributesType, -) *storage.VolumeExternal { - - volumeIDAttrs := volumeAttrs.VolumeIdAttributesPtr - volumeSnapshotAttrs := volumeAttrs.VolumeSnapshotAttributesPtr - internalName := d.helper.GetVolumeName(lunAttrs.Path()) - - volumeConfig := &storage.VolumeConfig{ - Version: tridentconfig.OrchestratorAPIVersion, - Name: internalName, - InternalName: internalName, - Size: strconv.FormatInt(int64(lunAttrs.Size()), 10), - Protocol: tridentconfig.Block, - SnapshotPolicy: volumeSnapshotAttrs.SnapshotPolicy(), - ExportPolicy: "", - SnapshotDir: "", - UnixPermissions: "", - StorageClass: "", - AccessMode: tridentconfig.ReadWriteOnce, - AccessInfo: utils.VolumeAccessInfo{}, - BlockSize: "", - FileSystem: "", - } - - return &storage.VolumeExternal{ - Config: volumeConfig, - Pool: volumeIDAttrs.ContainingAggregateName(), - } -} - -// GetUpdateType returns a bitmap populated with updates to the driver -func (d *SANEconomyStorageDriver) GetUpdateType(driverOrig storage.Driver) *roaring.Bitmap { - bitmap := roaring.New() - dOrig, ok := driverOrig.(*SANEconomyStorageDriver) - if !ok { - bitmap.Add(storage.InvalidUpdate) - return bitmap - } - - if d.Config.DataLIF != dOrig.Config.DataLIF { - bitmap.Add(storage.VolumeAccessInfoChange) - } - - if d.Config.Password != dOrig.Config.Password { - bitmap.Add(storage.PasswordChange) - } - - if d.Config.Username != dOrig.Config.Username { - bitmap.Add(storage.UsernameChange) - } - - return bitmap -} - -// LUNExists returns true if the named LUN exists across all buckets -func (d *SANEconomyStorageDriver) LUNExists(volName, bucketPrefix string) (bool, string, error) { - - volNameInternal := d.helper.GetInternalVolumeName(volName) - log.WithFields(log.Fields{ - "volNameInternal": volNameInternal, - "bucketPrefix": bucketPrefix, - }).Debug("LUNExists") - listResponse, err := d.API.LunGetAll(fmt.Sprintf("/vol/%s*/%s", bucketPrefix, volNameInternal)) - - if err != nil { - return false, "", err - } - if listResponse.Result.AttributesListPtr != nil { - for _, lunInfo := range listResponse.Result.AttributesListPtr.LunInfoPtr { - log.WithFields(log.Fields{ - "lunInfo": lunInfo, - "lunInfo.Path()": lunInfo.Path(), - }).Debug("LUNExists") - if strings.HasSuffix(lunInfo.Path(), volNameInternal) { - flexvol := listResponse.Result.AttributesListPtr.LunInfoPtr[0].Volume() - log.WithFields(log.Fields{ - "flexvol": flexvol, - }).Debug("LUNExists") - return true, flexvol, nil - } - } - } - return false, "", nil -} - -// Resize expands the Flexvol containing the LUN and updates the LUN size -func (d *SANEconomyStorageDriver) Resize(volConfig *storage.VolumeConfig, sizeBytes uint64) error { - - name := volConfig.InternalName - if d.Config.DebugTraceFlags["method"] { - fields := log.Fields{ - "Method": "Resize", - "Type": "SANEconomyStorageDriver", - "name": name, - "sizeBytes": sizeBytes, - } - log.WithFields(fields).Debug(">>>> Resize") - defer log.WithFields(fields).Debug("<<<< Resize") - } - - // Generic user-facing message - resizeError := errors.New("storage driver failed to resize the volume") - - // Validation checks - // get the volume where the lun exists - exists, bucketVol, err := d.LUNExists(name, d.FlexvolNamePrefix()) - if err != nil { - log.Errorf("Error checking for existing volume: %v", err) - return resizeError - } - if !exists { - return fmt.Errorf("error LUN %s does not exist", name) - } - - // Calculate the delta size needed to resize the bucketVol - totalLunSize, err := d.getTotalLUNSize(bucketVol) - if err != nil { - log.WithField("error", err).Error("Failed to determine total LUN size") - return resizeError - } - - sameSize, err := utils.VolumeSizeWithinTolerance(int64(sizeBytes), int64(totalLunSize), tridentconfig.SANResizeDelta) - if err != nil { - return err - } - - if sameSize { - log.WithFields(log.Fields{ - "requestedSize": sizeBytes, - "currentVolumeSize": totalLunSize, - "name": name, - "delta": tridentconfig.SANResizeDelta, - }).Info("Requested size and current volume size are within the delta and therefore considered the same size for SAN resize operations.") - return nil - } - - totalLunSizeBytes := uint64(totalLunSize) - if sizeBytes < totalLunSizeBytes { - return fmt.Errorf("requested size %d is less than existing volume size %d", sizeBytes, totalLunSizeBytes) - } - - if aggrLimitsErr := checkAggregateLimitsForFlexvol(bucketVol, sizeBytes, d.Config, d.GetAPI()); aggrLimitsErr != nil { - return aggrLimitsErr - } - - if _, _, checkVolumeSizeLimitsError := drivers.CheckVolumeSizeLimits(sizeBytes, d.Config.CommonStorageDriverConfig); checkVolumeSizeLimitsError != nil { - return checkVolumeSizeLimitsError - } - - // Resize operations - lunPath := d.helper.GetLUNPath(bucketVol, name) - if !d.API.SupportsFeature(api.LunGeometrySkip) { - // Check LUN geometry and verify LUN max size. - lunGeometry, err := d.API.LunGetGeometry(lunPath) - if err != nil { - log.WithField("error", err).Error("LUN resize failed.") - return fmt.Errorf("volume resize failed") - } - - lunMaxSize := lunGeometry.Result.MaxResizeSize() - if lunMaxSize < int(sizeBytes) { - log.WithFields(log.Fields{ - "error": err, - "sizeBytes": sizeBytes, - "lunMaxSize": lunMaxSize, - "lunPath": lunPath, - }).Error("Requested size is larger than LUN's maximum capacity.") - return fmt.Errorf("volume resize failed as requested size is larger than LUN's maximum capacity") - } - } - - // Resize FlexVol - response, err := d.API.VolumeSetSize(bucketVol, strconv.FormatUint(sizeBytes, 10)) - if err = api.GetError(response, err); err != nil { - log.WithField("error", err).Error("Volume resize failed.") - return fmt.Errorf("volume resize failed") - } - - // Resize LUN - returnSize, err := d.API.LunResize(lunPath, int(sizeBytes)) - if err != nil { - log.WithField("error", err).Error("LUN resize failed.") - return fmt.Errorf("volume resize failed") - } - log.WithField("size", returnSize).Debug("Returning.") - volConfig.Size = strconv.FormatUint(returnSize, 10) - - return nil -} - -// resizeFlexvol grows or shrinks the Flexvol to an optimal size if possible. Otherwise -// the Flexvol is expanded by the value of sizeBytes -func (d *SANEconomyStorageDriver) resizeFlexvol(flexvol string, sizeBytes uint64) error { - flexvolSizeBytes, err := d.getOptimalSizeForFlexvol(flexvol, sizeBytes) - if err != nil { - log.Warnf("Could not calculate optimal Flexvol size. %v", err) - // Lacking the optimal size, just grow the Flexvol to contain the new LUN - size := strconv.FormatUint(sizeBytes, 10) - resizeResponse, err := d.API.VolumeSetSize(flexvol, "+"+size) - if err = api.GetError(resizeResponse, err); err != nil { - return fmt.Errorf("flexvol resize failed: %v", err) - } - } else { - // Got optimal size, so just set the Flexvol to that value - flexvolSizeStr := strconv.FormatUint(flexvolSizeBytes, 10) - resizeResponse, err := d.API.VolumeSetSize(flexvol, flexvolSizeStr) - if err = api.GetError(resizeResponse, err); err != nil { - return fmt.Errorf("flexvol resize failed: %v", err) - } - } - return nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/account.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/account.go deleted file mode 100644 index f1daacbe2..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/account.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - - log "github.com/sirupsen/logrus" -) - -// AddAccount tbd -func (c *Client) AddAccount(req *AddAccountRequest) (accountID int64, err error) { - var result AddAccountResult - response, err := c.Request("AddAccount", req, NewReqID()) - if err != nil { - log.Errorf("Error detected in AddAccount API response: %+v", err) - return 0, errors.New("device API error") - } - - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected in AddAccount API response: %+v", err) - return 0, errors.New("device API error") - } - return result.Result.AccountID, nil -} - -// GetAccountByName tbd -func (c *Client) GetAccountByName(req *GetAccountByNameRequest) (account Account, err error) { - response, err := c.Request("GetAccountByName", req, NewReqID()) - if err != nil { - return - } - - var result GetAccountResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling GetAccountByName API response: %+v", err) - return Account{}, errors.New("json-decode error") - } - log.Debugf("returning account: %+v", result.Result.Account) - return result.Result.Account, err -} - -// GetAccountByID tbd -func (c *Client) GetAccountByID(req *GetAccountByIDRequest) (account Account, err error) { - var result GetAccountResult - response, err := c.Request("GetAccountByID", req, NewReqID()) - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling GetAccountByID API response: %+v", err) - return account, errors.New("json-decode error") - } - return result.Result.Account, err -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/api.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/api.go deleted file mode 100644 index cd85c6a87..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/api.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "errors" - "io/ioutil" - "math/rand" - "net/http" - "strings" - "time" - - log "github.com/sirupsen/logrus" - - tridentconfig "github.com/netapp/trident/config" - "github.com/netapp/trident/utils" -) - -const httpContentType = "json-rpc" - -// Client is used to send API requests to a SolidFire system system -type Client struct { - SVIP string - Endpoint string - Config *Config - DefaultAPIPort int - VolumeTypes *[]VolType - AccessGroups []int64 - DefaultBlockSize int64 - DebugTraceFlags map[string]bool - AccountID int64 -} - -// Config holds the configuration data for the Client to communicate with a SolidFire storage system -type Config struct { - TenantName string - EndPoint string - MountPoint string - SVIP string - InitiatorIFace string //iface to use of iSCSI initiator - Types *[]VolType - LegacyNamePrefix string - AccessGroups []int64 - DefaultBlockSize int64 - DebugTraceFlags map[string]bool -} - -// VolType holds quality of service configuration data -type VolType struct { - Type string - QOS QoS -} - -// NewFromParameters is a factory method to create a new sfapi.Client object using the supplied parameters -func NewFromParameters(pendpoint string, psvip string, pcfg Config) (c *Client, err error) { - SFClient := &Client{ - Endpoint: pendpoint, - SVIP: psvip, - Config: &pcfg, - DefaultAPIPort: 443, - VolumeTypes: pcfg.Types, - DefaultBlockSize: pcfg.DefaultBlockSize, - DebugTraceFlags: pcfg.DebugTraceFlags, - } - return SFClient, nil -} - -// Request performs a json-rpc POST to the configured endpoint -func (c *Client) Request(method string, params interface{}, id int) ([]byte, error) { - - var err error - var request *http.Request - var response *http.Response - var prettyRequestBuffer bytes.Buffer - var prettyResponseBuffer bytes.Buffer - - if c.Endpoint == "" { - log.Error("endpoint is not set, unable to issue json-rpc requests") - err = errors.New("no endpoint set") - return nil, err - } - - requestBody, err := json.Marshal(map[string]interface{}{ - "method": method, - "id": id, - "params": params, - }) - - // Create the request - request, err = http.NewRequest("POST", c.Endpoint, strings.NewReader(string(requestBody))) - if err != nil { - return nil, err - } - request.Header.Set("Content-Type", httpContentType) - - // Log the request - if c.Config.DebugTraceFlags["api"] { - json.Indent(&prettyRequestBuffer, requestBody, "", " ") - utils.LogHTTPRequest(request, prettyRequestBuffer.Bytes()) - } - - // Send the request - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - httpClient := &http.Client{ - Transport: tr, - Timeout: time.Duration(tridentconfig.StorageAPITimeoutSeconds * time.Second), - } - response, err = httpClient.Do(request) - if err != nil { - log.Errorf("Error response from SolidFire API request: %v", err) - return nil, errors.New("device API error") - } - - // Handle HTTP errors such as 401 (Unauthorized) - httpError := utils.NewHTTPError(response) - if httpError != nil { - log.WithFields(log.Fields{ - "request": method, - "responseCode": response.StatusCode, - "responseStatus": response.Status, - }).Errorf("API request failed.") - return nil, *httpError - } - - defer response.Body.Close() - responseBody, err := ioutil.ReadAll(response.Body) - if err != nil { - return responseBody, err - } - - // Log the response - if c.Config.DebugTraceFlags["api"] { - if c.shouldLogResponseBody(method) { - json.Indent(&prettyResponseBuffer, responseBody, "", " ") - utils.LogHTTPResponse(response, prettyResponseBuffer.Bytes()) - - } else { - utils.LogHTTPResponse(response, []byte("")) - } - } - - // Look for any errors returned from the controller - apiError := Error{} - json.Unmarshal([]byte(responseBody), &apiError) - if apiError.Fields.Code != 0 { - log.WithFields(log.Fields{ - "ID": apiError.ID, - "code": apiError.Fields.Code, - "message": apiError.Fields.Message, - "name": apiError.Fields.Name, - }).Error("Error detected in API response.") - return nil, apiError - } - - return responseBody, nil -} - -// shouldLogResponseBody prevents logging the REST response body for APIs that are -// extremely lengthy for no good reason or that return sensitive data like iSCSI secrets. -func (c *Client) shouldLogResponseBody(method string) bool { - - switch method { - case "GetAccountByName", "GetAccountByID", "ListAccounts": - return c.Config.DebugTraceFlags["sensitive"] - case "GetClusterHardwareInfo": - return c.Config.DebugTraceFlags["hardwareInfo"] - default: - return true - } -} - -// NewReqID generates a random id for a request -func NewReqID() int { - return rand.Intn(1000-1) + 1 -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/capacity.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/capacity.go deleted file mode 100644 index be8c34634..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/capacity.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - - log "github.com/sirupsen/logrus" -) - -// Get cluster capacity stats -func (c *Client) GetClusterCapacity() (capacity *ClusterCapacity, err error) { - var ( - clusterCapReq GetClusterCapacityRequest - clusterCapResult GetClusterCapacityResult - ) - - response, err := c.Request("GetClusterCapacity", clusterCapReq, NewReqID()) - if err != nil { - log.Errorf("Error detected in GetClusterCapacity API response: %+v", err) - return nil, errors.New("device API error") - } - if err := json.Unmarshal([]byte(response), &clusterCapResult); err != nil { - log.Errorf("Error detected unmarshalling json response: %+v", err) - return nil, errors.New("json decode error") - } - return &clusterCapResult.Result.ClusterCapacity, err -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/hardware.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/hardware.go deleted file mode 100644 index ee6740a81..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/hardware.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - - log "github.com/sirupsen/logrus" -) - -// Get cluster hardware info -func (c *Client) GetClusterHardwareInfo() (*ClusterHardwareInfo, error) { - var ( - clusterHardwareInfoReq struct{} - clusterHardwareInfoResult GetClusterHardwareInfoResult - ) - - response, err := c.Request("GetClusterHardwareInfo", clusterHardwareInfoReq, NewReqID()) - if err != nil { - log.Errorf("Error detected in GetClusterHardwareInfo API response: %+v", err) - return nil, errors.New("device API error") - } - - if err := json.Unmarshal([]byte(response), &clusterHardwareInfoResult); err != nil { - log.Errorf("Error detected unmarshalling json response: %+v", err) - return nil, errors.New("json decode error") - } - return &clusterHardwareInfoResult.Result.ClusterHardwareInfo, err -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/qos.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/qos.go deleted file mode 100644 index 6ece136ca..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/qos.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - - log "github.com/sirupsen/logrus" -) - -// Get default QoS information -func (c *Client) GetDefaultQoS() (*QoS, error) { - var ( - defaultQoSReq DefaultQoSRequest - defaultQoSResult DefaultQoSResult - ) - - response, err := c.Request("GetDefaultQoS", defaultQoSReq, NewReqID()) - if err != nil { - log.Errorf("error detected in GetDefaultQoS API response: %+v", err) - return nil, errors.New("device API error") - } - if err := json.Unmarshal([]byte(response), &defaultQoSResult); err != nil { - log.Errorf("error detected unmarshalling json response: %+v", err) - return nil, errors.New("json decode error") - } - - return &QoS{ - BurstIOPS: defaultQoSResult.Result.BurstIOPS, - MaxIOPS: defaultQoSResult.Result.MaxIOPS, - MinIOPS: defaultQoSResult.Result.MinIOPS, - }, err -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/snapshot.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/snapshot.go deleted file mode 100644 index 66cc32bed..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/snapshot.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - - log "github.com/sirupsen/logrus" -) - -func (c *Client) CreateSnapshot(req *CreateSnapshotRequest) (snapshot Snapshot, err error) { - response, err := c.Request("CreateSnapshot", req, NewReqID()) - var result CreateSnapshotResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling CreateSnapshot json response: %+v", err) - return Snapshot{}, errors.New("json decode error") - } - return c.GetSnapshot(result.Result.SnapshotID, req.VolumeID, "") -} - -func (c *Client) GetSnapshot(snapID, volID int64, sfName string) (s Snapshot, err error) { - var listReq ListSnapshotsRequest - listReq.VolumeID = volID - snapshots, err := c.ListSnapshots(&listReq) - if err != nil { - log.Errorf("Error in GetSnapshot from ListSnapshots: %+v", err) - return Snapshot{}, errors.New("failed to perform ListSnapshots") - } - for _, snap := range snapshots { - if snapID == snap.SnapshotID { - s = snap - break - } else if sfName != "" && sfName == snap.Name { - s = snap - break - } - } - return s, err -} - -func (c *Client) ListSnapshots(req *ListSnapshotsRequest) (snapshots []Snapshot, err error) { - response, err := c.Request("ListSnapshots", req, NewReqID()) - if err != nil { - log.Errorf("Error in ListSnapshots: %+v", err) - return nil, errors.New("failed to retrieve snapshots") - } - var result ListSnapshotsResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling ListSnapshots json response: %+v", err) - return nil, errors.New("json decode error") - } - snapshots = result.Result.Snapshots - return - -} - -func (c *Client) RollbackToSnapshot(req *RollbackToSnapshotRequest) (newSnapID int64, err error) { - response, err := c.Request("RollbackToSnapshot", req, NewReqID()) - if err != nil { - log.Errorf("Error in RollbackToSnapshot: %+v", err) - return 0, errors.New("failed to rollback snapshot") - } - var result RollbackToSnapshotResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling RollbackToSnapshot json response: %+v", err) - return 0, errors.New("json decode error") - } - newSnapID = result.Result.SnapshotID - err = nil - return - -} - -func (c *Client) DeleteSnapshot(snapshotID int64) (err error) { - // TODO(jdg): Add options like purge=True|False, range, ALL etc - var req DeleteSnapshotRequest - req.SnapshotID = snapshotID - _, err = c.Request("DeleteSnapshot", req, NewReqID()) - if err != nil { - log.Errorf("Error in DeleteSnapshot: %+v", err) - return errors.New("failed to delete snapshot") - } - return -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/types.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/types.go deleted file mode 100644 index 2891575aa..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/types.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import "fmt" - -// Error wrapper -type Error struct { - ID int `json:"id"` - Fields struct { - Code int `json:"code"` - Message string `json:"message"` - Name string `json:"name"` - } `json:"error"` -} - -func (e Error) Error() string { - return fmt.Sprintf("device API error: %+v", e.Fields.Name) -} - -// QoS settings -type QoS struct { - MinIOPS int64 `json:"minIOPS,omitempty"` - MaxIOPS int64 `json:"maxIOPS,omitempty"` - BurstIOPS int64 `json:"burstIOPS,omitempty"` - BurstTime int64 `json:"-"` -} - -// VolumePair settings -type VolumePair struct { - ClusterPairID int64 `json:"clusterPairID"` - RemoteVolumeID int64 `json:"remoteVolumeID"` - RemoteSliceID int64 `json:"remoteSliceID"` - RemoteVolumeName string `json:"remoteVolumeName"` - VolumePairUUID string `json:"volumePairUUID"` -} - -// Volume settings -type Volume struct { - VolumeID int64 `json:"volumeID"` - Name string `json:"name"` - AccountID int64 `json:"accountID"` - CreateTime string `json:"createTime"` - Status string `json:"status"` - Access string `json:"access"` - Enable512e bool `json:"enable512e"` - Iqn string `json:"iqn"` - ScsiEUIDeviceID string `json:"scsiEUIDeviceID"` - ScsiNAADeviceID string `json:"scsiNAADeviceID"` - Qos QoS `json:"qos"` - VolumeAccessGroups []int64 `json:"volumeAccessGroups"` - VolumePairs []VolumePair `json:"volumePairs"` - DeleteTime string `json:"deleteTime"` - PurgeTime string `json:"purgeTime"` - SliceCount int64 `json:"sliceCount"` - TotalSize int64 `json:"totalSize"` - BlockSize int64 `json:"blockSize"` - VirtualVolumeID string `json:"virtualVolumeID"` - Attributes interface{} `json:"attributes"` -} - -type Snapshot struct { - SnapshotID int64 `json:"snapshotID"` - VolumeID int64 `json:"volumeID"` - Name string `json:"name"` - Checksum string `json:"checksum"` - Status string `json:"status"` - TotalSize int64 `json:"totalSize"` - GroupID int64 `json:"groupID"` - CreateTime string `json:"createTime"` - Attributes interface{} `json:"attributes"` -} - -// ListVolumesRequest -type ListVolumesRequest struct { - Accounts []int64 `json:"accounts"` - StartVolumeID *int64 `json:"startVolumeID,omitempty"` - Limit *int64 `json:"limit,omitempty"` -} - -// ListVolumesForAccountRequest -type ListVolumesForAccountRequest struct { - AccountID int64 `json:"accountID"` -} - -// ListActiveVolumesRequest -type ListActiveVolumesRequest struct { - StartVolumeID int64 `json:"startVolumeID"` - Limit int64 `json:"limit"` -} - -// ListVolumesResult -type ListVolumesResult struct { - ID int `json:"id"` - Result struct { - Volumes []Volume `json:"volumes"` - } `json:"result"` -} - -// CreateVolumeRequest -type CreateVolumeRequest struct { - Name string `json:"name"` - AccountID int64 `json:"accountID"` - TotalSize int64 `json:"totalSize"` - Enable512e bool `json:"enable512e"` - Qos QoS `json:"qos,omitempty"` - Attributes interface{} `json:"attributes"` -} - -// CreateVolumeResult -type CreateVolumeResult struct { - ID int `json:"id"` - Result struct { - VolumeID int64 `json:"volumeID"` - } `json:"result"` -} - -// DeleteVolumeRequest -type DeleteVolumeRequest struct { - VolumeID int64 `json:"volumeID"` -} - -type CloneVolumeRequest struct { - VolumeID int64 `json:"volumeID"` - Name string `json:"name"` - SnapshotID int64 `json:"snapshotID"` - Attributes interface{} `json:"attributes"` -} - -type CloneVolumeResult struct { - ID int `json:"id"` - Result struct { - CloneID int64 `json:"cloneID"` - VolumeID int64 `json:"volumeID"` - AsyncHandle int64 `json:"asyncHandle"` - } `json:"result"` -} - -type CreateSnapshotRequest struct { - VolumeID int64 `json:"volumeID"` - SnapshotID int64 `json:"snapshotID"` - Name string `json:"name"` - EnableRemoteReplication bool `json:"enableRemoteReplication"` - Retention string `json:"retention"` - Attributes interface{} `json:"attributes"` -} - -type CreateSnapshotResult struct { - ID int `json:"id"` - Result struct { - SnapshotID int64 `json:"snapshotID"` - Checksum string `json:"checksum"` - } `json:"result"` -} - -type ListSnapshotsRequest struct { - VolumeID int64 `json:"volumeID"` -} - -type ListSnapshotsResult struct { - ID int `json:"id"` - Result struct { - Snapshots []Snapshot `json:"snapshots"` - } `json:"result"` -} - -type RollbackToSnapshotRequest struct { - VolumeID int64 `json:"volumeID"` - SnapshotID int64 `json:"snapshotID"` - SaveCurrentState bool `json:"saveCurrentState"` - Name string `json:"name"` - Attributes interface{} `json:"attributes"` -} - -type RollbackToSnapshotResult struct { - ID int `json:"id"` - Result struct { - Checksum string `json:"checksum"` - SnapshotID int64 `json:"snapshotID"` - } `json:"result"` -} - -type DeleteSnapshotRequest struct { - SnapshotID int64 `json:"snapshotID"` -} - -// AddVolumesToVolumeAccessGroupRequest -type AddVolumesToVolumeAccessGroupRequest struct { - VolumeAccessGroupID int64 `json:"volumeAccessGroupID"` - Volumes []int64 `json:"volumes"` -} - -// CreateVolumeAccessGroupRequest -type CreateVolumeAccessGroupRequest struct { - Name string `json:"name"` - Volumes []int64 `json:"volumes,omitempty"` - Initiators []string `json:"initiators,omitempty"` -} - -// CreateVolumeAccessGroupResult -type CreateVolumeAccessGroupResult struct { - ID int `json:"id"` - Result struct { - VagID int64 `json:"volumeAccessGroupID"` - } `json:"result"` -} - -// AddInitiatorsToVolumeAccessGroupRequest -type AddInitiatorsToVolumeAccessGroupRequest struct { - Initiators []string `json:"initiators"` - VAGID int64 `json:"volumeAccessGroupID"` -} - -// ListVolumeAccessGroupsRequest -type ListVolumeAccessGroupsRequest struct { - StartVAGID int64 `json:"startVolumeAccessGroupID,omitempty"` - Limit int64 `json:"limit,omitempty"` -} - -// ListVolumesAccessGroupsResult -type ListVolumesAccessGroupsResult struct { - ID int `json:"id"` - Result struct { - Vags []VolumeAccessGroup `json:"volumeAccessGroups"` - } `json:"result"` -} - -// EmptyResponse -type EmptyResponse struct { - ID int `json:"id"` - Result struct { - } `json:"result"` -} - -// VolumeAccessGroup -type VolumeAccessGroup struct { - Initiators []string `json:"initiators"` - Attributes interface{} `json:"attributes"` - DeletedVolumes []int64 `json:"deletedVolumes"` - Name string `json:"name"` - VAGID int64 `json:"volumeAccessGroupID"` - Volumes []int64 `json:"volumes"` -} - -// GetAccountByNameRequest -type GetAccountByNameRequest struct { - Name string `json:"username"` -} - -// GetAccountByIDRequest -type GetAccountByIDRequest struct { - AccountID int64 `json:"accountID"` -} - -// GetAccountResult -type GetAccountResult struct { - ID int `json:"id"` - Result struct { - Account Account `json:"account"` - } `json:"result"` -} - -// Account -type Account struct { - AccountID int64 `json:"accountID,omitempty"` - Username string `json:"username,omitempty"` - Status string `json:"status,omitempty"` - Volumes []int64 `json:"volumes,omitempty"` - InitiatorSecret string `json:"initiatorSecret,omitempty"` - TargetSecret string `json:"targetSecret,omitempty"` - Attributes interface{} `json:"attributes,omitempty"` -} - -// AddAccountRequest -type AddAccountRequest struct { - Username string `json:"username"` - InitiatorSecret string `json:"initiatorSecret,omitempty"` - TargetSecret string `json:"targetSecret,omitempty"` - Attributes interface{} `json:"attributes,omitempty"` -} - -// AddAccountResult -type AddAccountResult struct { - ID int `json:"id"` - Result struct { - AccountID int64 `json:"accountID"` - } `json:"result"` -} - -type ClusterCapacity struct { - ActiveBlockSpace int64 `json:"activeBlockSpace"` - ActiveSessions int64 `json:"activeSessions"` - AverageIOPS int64 `json:"averageIOPS"` - ClusterRecentIOSize int64 `json:"clusterRecentIOSize"` - CurrentIOPS int64 `json:"currentIOPS"` - MaxIOPS int64 `json:"maxIOPS"` - MaxOverProvisionableSpace int64 `json:"maxOverProvisionableSpace"` - MaxProvisionedSpace int64 `json:"maxProvisionedSpace"` - MaxUsedMetadataSpace int64 `json:"maxUsedMetadataSpace"` - MaxUsedSpace int64 `json:"maxUsedSpace"` - NonZeroBlocks int64 `json:"nonZeroBlocks"` - PeakActiveSessions int64 `json:"peakActiveSessions"` - PeakIOPS int64 `json:"peakIOPS"` - ProvisionedSpace int64 `json:"provisionedSpace"` - Timestamp string `json:"timestamp"` - TotalOps int64 `json:"totalOps"` - UniqueBlocks int64 `json:"uniqueBlocks"` - UniqueBlocksUsedSpace int64 `json:"uniqueBlocksUsedSpace"` - UsedMetadataSpace int64 `json:"usedMetadataSpace"` - UsedMetadataSpaceInSnapshots int64 `json:"usedMetadataSpaceInSnapshots"` - UsedSpace int64 `json:"usedSpace"` - ZeroBlocks int64 `json:"zeroBlocks"` -} - -type GetClusterCapacityRequest struct { -} - -type GetClusterCapacityResult struct { - ID int `json:"id"` - Result struct { - ClusterCapacity ClusterCapacity `json:"clusterCapacity"` - } `json:"result"` -} - -type GetClusterHardwareInfoResult struct { - ID int `json:"id"` - Result struct { - ClusterHardwareInfo ClusterHardwareInfo `json:"clusterHardwareInfo"` - } `json:"result"` -} - -type DefaultQoSRequest struct { -} - -type DefaultQoSResult struct { - ID int `json:"id"` - Result struct { - BurstIOPS int64 `json:"burstIOPS"` - MaxIOPS int64 `json:"maxIOPS"` - MinIOPS int64 `json:"minIOPS"` - } `json:"result"` -} - -type ClusterHardwareInfo struct { - Drives interface{} `json:"drives"` - Nodes interface{} `json:"nodes"` -} - -type ModifyVolumeRequest struct { - VolumeID int64 `json:"volumeID"` - AccountID int64 `json:"accountID,omitempty"` - Access string `json:"access,omitempty"` - Qos QoS `json:"qos,omitempty"` - TotalSize int64 `json:"totalSize,omitempty"` - Attributes interface{} `json:"attributes,omitempty"` -} - -type ModifyVolumeResult struct { - Volume Volume `json:"volume,omitempty"` - Curve QoS `json:"curve,omitempty"` -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/vag.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/vag.go deleted file mode 100644 index 926252202..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/vag.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - - log "github.com/sirupsen/logrus" -) - -// CreateVolumeAccessGroup tbd -func (c *Client) CreateVolumeAccessGroup(r *CreateVolumeAccessGroupRequest) (vagID int64, err error) { - var result CreateVolumeAccessGroupResult - response, err := c.Request("CreateVolumeAccessGroup", r, NewReqID()) - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling CreateVolumeAccessGroupResult API response: %+v", err) - return 0, errors.New("json-decode error") - } - vagID = result.Result.VagID - return - -} - -// ListVolumeAccessGroups tbd -func (c *Client) ListVolumeAccessGroups(r *ListVolumeAccessGroupsRequest) (vags []VolumeAccessGroup, err error) { - response, err := c.Request("ListVolumeAccessGroups", r, NewReqID()) - if err != nil { - log.Errorf("Error in ListVolumeAccessGroupResult API response: %+v", err) - return nil, errors.New("failed to retrieve VAG list") - } - var result ListVolumesAccessGroupsResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling ListVolumeAccessGroupResult API response: %+v", err) - return nil, errors.New("json-decode error") - } - vags = result.Result.Vags - return -} - -// AddInitiatorsToVolumeAccessGroup tbd -func (c *Client) AddInitiatorsToVolumeAccessGroup(r *AddInitiatorsToVolumeAccessGroupRequest) error { - _, err := c.Request("AddInitiatorsToVolumeAccessGroup", r, NewReqID()) - if err != nil { - log.Errorf("Error in AddInitiator to VAG API response: %+v", err) - return errors.New("failed to add initiator to VAG") - } - return nil -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/volume.go b/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/volume.go deleted file mode 100644 index 23ff4c791..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/solidfire/api/volume.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package api - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "github.com/cenkalti/backoff" - log "github.com/sirupsen/logrus" - - "github.com/netapp/trident/utils" -) - -// ListVolumesForAccount tbd -func (c *Client) ListVolumesForAccount(listReq *ListVolumesForAccountRequest) (volumes []Volume, err error) { - response, err := c.Request("ListVolumesForAccount", listReq, NewReqID()) - if err != nil { - log.Errorf("Error detected in ListVolumesForAccount API response: %+v", err) - return nil, errors.New("device API error") - } - var result ListVolumesResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling ListVolumesForAccount API response: %+v", err) - return nil, errors.New("json-decode error") - } - volumes = result.Result.Volumes - return volumes, err -} - -// GetVolumeByID returns the volume with the specified ID. -func (c *Client) GetVolumeByID(volID int64) (Volume, error) { - - var limit int64 = 1 - - listRequest := &ListVolumesRequest{ - Accounts: []int64{c.AccountID}, - StartVolumeID: &volID, - Limit: &limit, - } - - volumes, err := c.ListVolumes(listRequest) - if err != nil { - return Volume{}, err - } - - // This API isn't guaranteed to return the volume being sought, so make sure the result matches the request! - if volumes == nil || len(volumes) == 0 || volumes[0].VolumeID != volID { - return Volume{}, fmt.Errorf("volume %d not found", volID) - } - - return volumes[0], nil -} - -// WaitForVolumeByID polls for the volume with specified ID to appear, with backoff retry logic. -func (c *Client) WaitForVolumeByID(volID int64) (Volume, error) { - - volume := Volume{} - - checkVolumeExists := func() error { - var err error - volume, err = c.GetVolumeByID(volID) - if err != nil { - return fmt.Errorf("volume %d does not yet exist; %v", volID, err) - } - return nil - } - volumeExistsNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Volume not yet present, waiting.") - } - volumeBackoff := backoff.NewExponentialBackOff() - volumeBackoff.InitialInterval = 2 * time.Second - volumeBackoff.Multiplier = 1.414 - volumeBackoff.RandomizationFactor = 0.1 - volumeBackoff.MaxElapsedTime = 30 * time.Second - - // Run the volume check using an exponential backoff - if err := backoff.RetryNotify(checkVolumeExists, volumeBackoff, volumeExistsNotify); err != nil { - log.WithField("volumeID", volID).Warnf( - "Could not find volume after %3.2f seconds.", volumeBackoff.MaxElapsedTime.Seconds()) - return volume, fmt.Errorf("volume %d does not exist", volID) - } else { - log.WithField("volumeID", volID).Debug("Volume found.") - return volume, nil - } -} - -// ListVolumes returns all volumes using the specified request object. -func (c *Client) ListVolumes(listVolReq *ListVolumesRequest) (volumes []Volume, err error) { - response, err := c.Request("ListVolumes", listVolReq, NewReqID()) - if err != nil { - log.Errorf("Error response from ListVolumes request: %v ", err) - return nil, errors.New("device API error") - } - var result ListVolumesResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling ListVolumes API response: %v", err) - return nil, errors.New("json-decode error") - } - volumes = result.Result.Volumes - return volumes, err -} - -// CloneVolume invokes the supplied clone volume request. It waits for the source volume -// (which itself may be new in a test scenario) to be ready to be cloned, and it waits for -// the clone to exist. -func (c *Client) CloneVolume(req *CloneVolumeRequest) (Volume, error) { - var cloneError error - var response []byte - var result CloneVolumeResult - - // We use this loop to deal with things like trying to immediately clone - // from a volume that was just created. Sometimes it can take a few - // seconds for the Slice to finalize even though the Volume reports ready. - // We'll do a backoff retry loop here, at some point would be handy go have - // a global util for us to use for any call - retry := 0 - for retry < 10 { - response, cloneError = c.Request("CloneVolume", req, NewReqID()) - if cloneError != nil { - errorMessage := cloneError.Error() - if strings.Contains(errorMessage, "SliceNotRegistered") { - log.Warningf("detected SliceNotRegistered on Clone operation, retrying in %d seconds", 2+retry) - time.Sleep(time.Second * time.Duration(2+retry)) - retry++ - } else if strings.Contains(errorMessage, "xInvalidParameter") { - log.Warningf("detected xInvalidParameter on Clone operation, retrying in %d seconds", 2+retry) - time.Sleep(time.Second * time.Duration(2+retry)) - retry++ - } else { - break - } - } else { - break - } - } - - if cloneError != nil { - log.Errorf("Failed to clone volume: %+v", cloneError) - return Volume{}, cloneError - } - log.Info("Clone request succeeded") - - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling CloneVolume API response: %v", err) - return Volume{}, errors.New("json-decode error") - } - - return c.WaitForVolumeByID(result.Result.VolumeID) -} - -// CreateVolume tbd -func (c *Client) CreateVolume(createReq *CreateVolumeRequest) (Volume, error) { - response, err := c.Request("CreateVolume", createReq, NewReqID()) - if err != nil { - log.Errorf("Error response from CreateVolume request: %v ", err) - return Volume{}, errors.New("device API error") - } - var result CreateVolumeResult - if err := json.Unmarshal([]byte(response), &result); err != nil { - log.Errorf("Error detected unmarshalling CreateVolume API response: %v", err) - return Volume{}, errors.New("json-decode error") - } - - return c.WaitForVolumeByID(result.Result.VolumeID) -} - -// AddVolumesToAccessGroup tbd -func (c *Client) AddVolumesToAccessGroup(req *AddVolumesToVolumeAccessGroupRequest) (err error) { - _, err = c.Request("AddVolumesToVolumeAccessGroup", req, NewReqID()) - if err != nil { - if apiErr, ok := err.(Error); ok && apiErr.Fields.Name == "xAlreadyInVolumeAccessGroup" { - return nil - } - log.Errorf("error response from Add to VAG request: %+v ", err) - return errors.New("device API error") - } - return err -} - -// DeleteRange tbd -func (c *Client) DeleteRange(startID, endID int64) { - idx := startID - for idx < endID { - c.DeleteVolume(idx) - } - return -} - -// DeleteVolume tbd -func (c *Client) DeleteVolume(volumeID int64) (err error) { - // TODO(jdg): Add options like purge=True|False, range, ALL etc - var req DeleteVolumeRequest - req.VolumeID = volumeID - _, err = c.Request("DeleteVolume", req, NewReqID()) - if err != nil { - // TODO: distinguish what the error was? - log.Errorf("Error response from DeleteVolume request: %+v ", err) - return errors.New("device API error") - } - _, err = c.Request("PurgeDeletedVolume", req, NewReqID()) - return -} - -// DetachVolume tbd -func (c *Client) DetachVolume(v Volume) (err error) { - if c.SVIP == "" { - log.Errorf("error response from DetachVolume request: %+v ", err) - return errors.New("detach volume error") - } - - err = utils.ISCSIDisableDelete(v.Iqn, c.SVIP) - return -} - -func (c *Client) ModifyVolume(req *ModifyVolumeRequest) (err error) { - _, err = c.Request("ModifyVolume", req, NewReqID()) - if err != nil { - log.Errorf("Error response from ModifyVolume request: %+v ", err) - return errors.New("device API error") - } - return err -} diff --git a/vendor/github.com/netapp/trident/storage_drivers/types.go b/vendor/github.com/netapp/trident/storage_drivers/types.go deleted file mode 100644 index 31f617a90..000000000 --- a/vendor/github.com/netapp/trident/storage_drivers/types.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. -package storagedrivers - -import ( - "encoding/json" - "fmt" - "strings" - - trident "github.com/netapp/trident/config" - "github.com/netapp/trident/storage/fake" - sfapi "github.com/netapp/trident/storage_drivers/solidfire/api" -) - -// CommonStorageDriverConfig holds settings in common across all StorageDrivers -type CommonStorageDriverConfig struct { - Version int `json:"version"` - StorageDriverName string `json:"storageDriverName"` - BackendName string `json:"backendName"` - Debug bool `json:"debug"` // Unsupported! - DebugTraceFlags map[string]bool `json:"debugTraceFlags"` // Example: {"api":false, "method":true} - DisableDelete bool `json:"disableDelete"` - StoragePrefixRaw json.RawMessage `json:"storagePrefix,string"` - StoragePrefix *string `json:"-"` - SerialNumbers []string `json:"serialNumbers,omitEmpty"` - DriverContext trident.DriverContext `json:"-"` - LimitVolumeSize string `json:"limitVolumeSize"` -} - -type CommonStorageDriverConfigDefaults struct { - Size string `json:"size"` -} - -// ESeriesStorageDriverConfig holds settings for ESeriesStorageDriver -type ESeriesStorageDriverConfig struct { - *CommonStorageDriverConfig - - // Web Proxy Services Info - WebProxyHostname string `json:"webProxyHostname"` - WebProxyPort string `json:"webProxyPort"` // optional - WebProxyUseHTTP bool `json:"webProxyUseHTTP"` // optional - WebProxyVerifyTLS bool `json:"webProxyVerifyTLS"` // optional - Username string `json:"username"` - Password string `json:"password"` - - // Array Info - ControllerA string `json:"controllerA"` - ControllerB string `json:"controllerB"` - PasswordArray string `json:"passwordArray"` //optional - - // Options - PoolNameSearchPattern string `json:"poolNameSearchPattern"` //optional - - // Host Networking - HostDataIPDeprecated string `json:"hostData_IP,omitempty"` // for backward compatibility only - HostDataIP string `json:"hostDataIP"` // for iSCSI can be either port if multipathing is setup - AccessGroup string `json:"accessGroupName"` // name for host group - HostType string `json:"hostType"` // host type, default is 'linux_dm_mp' - - EseriesStorageDriverPool - Storage []EseriesStorageDriverPool `json:"storage"` -} - -type EseriesStorageDriverPool struct { - Labels map[string]string `json:"labels"` - Region string `json:"region"` - Zone string `json:"zone"` - EseriesStorageDriverConfigDefaults `json:"defaults"` -} - -type EseriesStorageDriverConfigDefaults struct { - CommonStorageDriverConfigDefaults -} - -// OntapStorageDriverConfig holds settings for OntapStorageDrivers -type OntapStorageDriverConfig struct { - *CommonStorageDriverConfig // embedded types replicate all fields - ManagementLIF string `json:"managementLIF"` - DataLIF string `json:"dataLIF"` - IgroupName string `json:"igroupName"` - SVM string `json:"svm"` - Username string `json:"username"` - Password string `json:"password"` - Aggregate string `json:"aggregate"` - UsageHeartbeat string `json:"usageHeartbeat"` // in hours, default to 24.0 - QtreePruneFlexvolsPeriod string `json:"qtreePruneFlexvolsPeriod"` // in seconds, default to 600 - QtreeQuotaResizePeriod string `json:"qtreeQuotaResizePeriod"` // in seconds, default to 60 - EmptyFlexvolDeferredDeletePeriod string `json:"emptyFlexvolDeferredDeletePeriod"` // in seconds, default to 28800 - NfsMountOptions string `json:"nfsMountOptions"` - LimitAggregateUsage string `json:"limitAggregateUsage"` - OntapStorageDriverConfigDefaults `json:"defaults"` -} - -type OntapStorageDriverConfigDefaults struct { - SpaceAllocation string `json:"spaceAllocation"` - SpaceReserve string `json:"spaceReserve"` - SnapshotPolicy string `json:"snapshotPolicy"` - SnapshotReserve string `json:"snapshotReserve"` - SnapshotDir string `json:"snapshotDir"` - UnixPermissions string `json:"unixPermissions"` - ExportPolicy string `json:"exportPolicy"` - SecurityStyle string `json:"securityStyle"` - SplitOnClone string `json:"splitOnClone"` - FileSystemType string `json:"fileSystemType"` - Encryption string `json:"encryption"` - CommonStorageDriverConfigDefaults -} - -// SolidfireStorageDriverConfig holds settings for SolidfireStorageDrivers -type SolidfireStorageDriverConfig struct { - *CommonStorageDriverConfig // embedded types replicate all fields - TenantName string - EndPoint string - SVIP string - InitiatorIFace string //iface to use of iSCSI initiator - Types *[]sfapi.VolType - LegacyNamePrefix string //name prefix used in earlier ndvp versions - AccessGroups []int64 - UseCHAP bool - DefaultBlockSize int64 //blocksize to use on create when not specified (512|4096, 512 is default) - - SolidfireStorageDriverPool - Storage []SolidfireStorageDriverPool `json:"storage"` -} - -type SolidfireStorageDriverPool struct { - Labels map[string]string `json:"labels"` - Region string `json:"region"` - Zone string `json:"zone"` - Type string `json:"type"` - SolidfireStorageDriverConfigDefaults `json:"defaults"` -} - -type SolidfireStorageDriverConfigDefaults struct { - CommonStorageDriverConfigDefaults -} - -type AWSNFSStorageDriverConfig struct { - *CommonStorageDriverConfig - APIURL string `json:"apiURL"` - APIKey string `json:"apiKey"` - APIRegion string `json:"apiRegion"` - SecretKey string `json:"secretKey"` - ProxyURL string `json:"proxyURL"` - NfsMountOptions string `json:"nfsMountOptions"` - AWSNFSStorageDriverPool - Storage []AWSNFSStorageDriverPool `json:"storage"` -} - -type AWSNFSStorageDriverPool struct { - Labels map[string]string `json:"labels"` - Region string `json:"region"` - Zone string `json:"zone"` - ServiceLevel string `json:"serviceLevel"` - AWSNFSStorageDriverConfigDefaults `json:"defaults"` -} - -type AWSNFSStorageDriverConfigDefaults struct { - ExportRule string `json:"exportRule"` - SnapshotDir string `json:"snapshotDir"` - SnapshotReserve string `json:"snapshotReserve"` - CommonStorageDriverConfigDefaults -} - -type AzureNFSStorageDriverConfig struct { - *CommonStorageDriverConfig - SubscriptionID string `json:"subscriptionID"` - TenantID string `json:"tenantID"` - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - NfsMountOptions string `json:"nfsMountOptions"` - AzureNFSStorageDriverPool - Storage []AzureNFSStorageDriverPool `json:"storage"` -} - -// Note that 'Region' and 'Zone' are internal specifiers, not related to Azure's -// 'Location' field. -type AzureNFSStorageDriverPool struct { - Labels map[string]string `json:"labels"` - Region string `json:"region"` - Zone string `json:"zone"` - Location string `json:"location"` - ServiceLevel string `json:"serviceLevel"` - VirtualNetwork string `json:"virtualNetwork"` - Subnet string `json:"subnet"` - AzureNFSStorageDriverConfigDefaults `json:"defaults"` -} - -type AzureNFSStorageDriverConfigDefaults struct { - ExportRule string `json:"exportRule"` - CommonStorageDriverConfigDefaults -} - -type GCPNFSStorageDriverConfig struct { - *CommonStorageDriverConfig - ProjectNumber string `json:"projectNumber"` - APIKey GCPPrivateKey `json:"apiKey"` - APIRegion string `json:"apiRegion"` - ProxyURL string `json:"proxyURL"` - NfsMountOptions string `json:"nfsMountOptions"` - GCPNFSStorageDriverPool - Storage []GCPNFSStorageDriverPool `json:"storage"` -} - -type GCPNFSStorageDriverPool struct { - Labels map[string]string `json:"labels"` - Region string `json:"region"` - Zone string `json:"zone"` - ServiceLevel string `json:"serviceLevel"` - Network string `json:"network"` - GCPNFSStorageDriverConfigDefaults `json:"defaults"` -} - -type GCPNFSStorageDriverConfigDefaults struct { - ExportRule string `json:"exportRule"` - SnapshotDir string `json:"snapshotDir"` - SnapshotReserve string `json:"snapshotReserve"` - CommonStorageDriverConfigDefaults -} - -type GCPPrivateKey struct { - Type string `json:"type"` - ProjectID string `json:"project_id"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - ClientEmail string `json:"client_email"` - ClientID string `json:"client_id"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"` - ClientX509CertURL string `json:"client_x509_cert_url"` -} - -type FakeStorageDriverConfig struct { - *CommonStorageDriverConfig - Protocol trident.Protocol `json:"protocol"` - // Pools are the modeled physical pools. At least one is required. - Pools map[string]*fake.StoragePool `json:"pools"` - // Volumes are the modeled backend volumes that exist when the driver starts. Optional. - Volumes []fake.Volume `json:"volumes"` - InstanceName string `json:"instanceName"` - FakeStorageDriverPool - Storage []FakeStorageDriverPool `json:"storage"` -} - -type FakeStorageDriverPool struct { - Labels map[string]string `json:"labels"` - Region string `json:"region"` - Zone string `json:"zone"` - FakeStorageDriverConfigDefaults `json:"defaults"` -} - -type FakeStorageDriverConfigDefaults struct { - CommonStorageDriverConfigDefaults -} - -type BackendIneligibleError struct { - message string -} - -func (e *BackendIneligibleError) Error() string { return e.message } - -func NewBackendIneligibleError(volumeName string, errors []error) error { - messages := make([]string, 0) - for _, err := range errors { - messages = append(messages, err.Error()) - } - - return &BackendIneligibleError{ - message: fmt.Sprintf("backend cannot satisfy create request for volume %s: (%s)", - volumeName, strings.Join(messages, "; ")), - } -} - -func IsBackendIneligibleError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*BackendIneligibleError) - return ok -} - -type VolumeExistsError struct { - message string -} - -func (e *VolumeExistsError) Error() string { return e.message } - -func NewVolumeExistsError(name string) error { - return &VolumeExistsError{ - message: fmt.Sprintf("volume %s already exists", name), - } -} - -func IsVolumeExistsError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*VolumeExistsError) - return ok -} - -type SnapshotsNotSupportedError struct { - message string -} - -func (e *SnapshotsNotSupportedError) Error() string { return e.message } - -func NewSnapshotsNotSupportedError(backendType string) error { - return &SnapshotsNotSupportedError{ - message: fmt.Sprintf("snapshots are not supported by backend type %s", backendType), - } -} diff --git a/vendor/github.com/netapp/trident/utils/crypto.go b/vendor/github.com/netapp/trident/utils/crypto.go deleted file mode 100644 index ca9b87762..000000000 --- a/vendor/github.com/netapp/trident/utils/crypto.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package utils - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/pem" - "math/big" - "time" -) - -type CertInfo struct { - CAKey string - CACert string - ServerKey string - ServerCert string - ClientKey string - ClientCert string -} - -// makeHTTPCertInfo generates a CA key and cert, then uses that key to sign two -// other keys and certs, one for a TLS server and one for a TLS client. None of -// the parameters are configurable...the serial numbers and principal names are -// hardcoded, the validity period is hardcoded to 1970-2070, and the algorithm -// and key size are hardcoded to 521-bit elliptic curve. -func MakeHTTPCertInfo(caCertName, serverCertName, clientCertName string) (*CertInfo, error) { - - certInfo := &CertInfo{} - - notBefore := time.Unix(0, 0) // The Epoch (1970 Jan 1) - notAfter := notBefore.Add(time.Hour * 24 * 36525) // 100 years (365.25 days per year) - - // Create CA key - caKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return nil, err - } - caKeyBase64, err := keyToBase64String(caKey) - if err != nil { - return nil, err - } - certInfo.CAKey = caKeyBase64 - - // Create CA cert - caCert := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(1), - Subject: pkix.Name{ - Country: []string{"US"}, - Province: []string{"NC"}, - Locality: []string{"RTP"}, - Organization: []string{"NetApp"}, - CommonName: caCertName, - }, - NotBefore: notBefore, - NotAfter: notAfter, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - IsCA: true, - SubjectKeyId: bigIntHash(caKey.D), - BasicConstraintsValid: true, - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &caCert, &caCert, &caKey.PublicKey, caKey) - if err != nil { - return nil, err - } - certInfo.CACert = certToBase64String(derBytes) - - // Create HTTPS server key - serverKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return nil, err - } - serverKeyBase64, err := keyToBase64String(serverKey) - if err != nil { - return nil, err - } - certInfo.ServerKey = serverKeyBase64 - - // Create HTTPS server cert - serverCert := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(2), - Subject: pkix.Name{ - Country: []string{"US"}, - Province: []string{"NC"}, - Locality: []string{"RTP"}, - Organization: []string{"NetApp"}, - CommonName: serverCertName, - }, - NotBefore: notBefore, - NotAfter: notAfter, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - AuthorityKeyId: caCert.SubjectKeyId, - SubjectKeyId: bigIntHash(serverKey.D), - } - - derBytes, err = x509.CreateCertificate(rand.Reader, &serverCert, &caCert, &serverKey.PublicKey, caKey) - if err != nil { - return nil, err - } - certInfo.ServerCert = certToBase64String(derBytes) - - // Create HTTPS client key - clientKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return nil, err - } - clientKeyBase64, err := keyToBase64String(clientKey) - if err != nil { - return nil, err - } - certInfo.ClientKey = clientKeyBase64 - - // Create HTTPS client cert - clientCert := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(3), - Subject: pkix.Name{ - Country: []string{"US"}, - Province: []string{"NC"}, - Locality: []string{"RTP"}, - Organization: []string{"NetApp"}, - CommonName: clientCertName, - }, - NotBefore: notBefore, - NotAfter: notAfter, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - AuthorityKeyId: caCert.SubjectKeyId, - SubjectKeyId: bigIntHash(clientKey.D), - } - - derBytes, err = x509.CreateCertificate(rand.Reader, &clientCert, &caCert, &clientKey.PublicKey, caKey) - if err != nil { - return nil, err - } - certInfo.ClientCert = certToBase64String(derBytes) - - return certInfo, nil -} - -func keyToBase64String(key *ecdsa.PrivateKey) (string, error) { - b, err := x509.MarshalECPrivateKey(key) - if err != nil { - return "", err - } - keyBytes := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) - return base64.StdEncoding.EncodeToString(keyBytes), nil -} - -func certToBase64String(derBytes []byte) string { - certBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - return base64.StdEncoding.EncodeToString(certBytes) -} - -func bigIntHash(n *big.Int) []byte { - hash := sha1.New() - hash.Write(n.Bytes()) - return hash.Sum(nil) -} diff --git a/vendor/github.com/netapp/trident/utils/k8s_utils.go b/vendor/github.com/netapp/trident/utils/k8s_utils.go deleted file mode 100644 index 734101b27..000000000 --- a/vendor/github.com/netapp/trident/utils/k8s_utils.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2017 The Kubernetes Authors. -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package utils - -import ( - "bytes" - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - - "os" - "syscall" - - log "github.com/sirupsen/logrus" -) - -const ( - // How many times to retry for a consistent read of /proc/mounts. - maxListTries = 3 - // Number of fields per line in /proc/mounts as per the fstab man page. - expectedNumProcMntFieldsPerLine = 6 - // Number of fields per line in /proc/self/mountinfo as per the fstab man page. - expectedNumProcSelfMntInfoFieldsPerLine = 11 - // Location of the mount file to use - procMountsPath = "/proc/mounts" - // Location of the mount file to use - procSelfMountinfoPath = "/proc/self/mountinfo" -) - -// This represents a single line in /proc/mounts or /etc/fstab. -type MountPoint struct { - Device string - Path string - Type string - Opts []string - Freq int - Pass int -} - -// This represents a single line in /proc/self/mountinfo. -type MountInfo struct { - MountId int - ParentId int - DeviceId string - Root string - MountPoint string - MountOptions []string - //OptionalFields []string - FsType string - MountSource string - SuperOptions []string -} - -// IsLikelyDir determines if mountpoint is a directory -func IsLikelyDir(mountpoint string) (bool, error) { - stat, err := os.Stat(mountpoint) - if err != nil { - return false, err - } - - return stat.IsDir(), nil -} - -// IsLikelyNotMountPoint determines if a directory is not a mountpoint. -func IsLikelyNotMountPoint(mountpoint string) (bool, error) { - stat, err := os.Stat(mountpoint) - if err != nil { - return true, err - } - rootStat, err := os.Lstat(filepath.Dir(strings.TrimSuffix(mountpoint, "/"))) - if err != nil { - return true, err - } - // If the directory has a different device as parent, then it is a mountpoint. - if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev { - return false, nil - } - - return true, nil -} - -func GetDeviceNameFromMount(mountpath string) (string, int, error) { - - fields := log.Fields{"mountpath": mountpath} - log.WithFields(fields).Debug(">>>> k8s_utils.GetDeviceNameFromMount") - defer log.WithFields(fields).Debug("<<<< k8s_utils.GetDeviceNameFromMount") - - mps, err := listProcMounts(procMountsPath) - if err != nil { - return "", 0, err - } - - // Find the device name. - // FIXME if multiple devices mounted on the same mount path, only the first one is returned - device := "" - // If mountPath is symlink, need get its target path. - slTarget, err := filepath.EvalSymlinks(mountpath) - if err != nil { - slTarget = mountpath - } - for i := range mps { - if mps[i].Path == slTarget { - device = mps[i].Device - break - } - } - - // Find all references to the device. - refCount := 0 - for i := range mps { - if mps[i].Device == device { - refCount++ - } - } - - log.WithFields(log.Fields{ - "mountpath": mountpath, - "device": device, - "refCount": refCount, - }).Debug("Found device from mountpath.") - - return device, refCount, nil -} - -// listProcSelfMountinfo (Available since Linux 2.6.26) lists information about mount points -// in the process's mount namespace. Ref: http://man7.org/linux/man-pages/man5/proc.5.html -// for /proc/[pid]/mountinfo -func listProcSelfMountinfo(mountFilePath string) ([]MountInfo, error) { - content, err := ConsistentRead(mountFilePath, maxListTries) - if err != nil { - return nil, err - } - return parseProcSelfMountinfo(content) -} - -// parseProcSelfMountinfo parses the output of /proc/self/mountinfo file into a slice of MountInfo struct -func parseProcSelfMountinfo(content []byte) ([]MountInfo, error) { - out := make([]MountInfo, 0) - lines := strings.Split(string(content), "\n") - for _, line := range lines { - if line == "" { - // The last split() item is empty string following the last \n - continue - } - fields := strings.Fields(line) - fieldLines := len(fields) - expectedFieldsPerLine := expectedNumProcSelfMntInfoFieldsPerLine - if fieldLines > expectedFieldsPerLine || fieldLines < (expectedFieldsPerLine-1) { - return nil, fmt.Errorf("wrong number of fields (expected %d or %d, got %d): %s", expectedFieldsPerLine, - (expectedFieldsPerLine - 1), len(fields), line) - } - - // If root value is marked deleted, skip the entry - if strings.Contains(fields[3], "deleted") { - continue - } - - mp := MountInfo{ - DeviceId: fields[2], - Root: fields[3], - MountPoint: fields[4], - MountOptions: strings.Split(fields[5], ","), - } - - mountId, err := strconv.Atoi(fields[0]) - if err != nil { - return nil, err - } - mp.MountId = mountId - - parentId, err := strconv.Atoi(fields[1]) - if err != nil { - return nil, err - } - mp.ParentId = parentId - - mp.FsType = fields[fieldLines-3] - mp.MountSource = fields[fieldLines-2] - mp.SuperOptions = strings.Split(fields[fieldLines-1], ",") - - out = append(out, mp) - } - return out, nil -} - -func listProcMounts(mountFilePath string) ([]MountPoint, error) { - content, err := ConsistentRead(mountFilePath, maxListTries) - if err != nil { - return nil, err - } - return parseProcMounts(content) -} - -func parseProcMounts(content []byte) ([]MountPoint, error) { - out := make([]MountPoint, 0) - lines := strings.Split(string(content), "\n") - for _, line := range lines { - if line == "" { - // the last split() item is empty string following the last \n - continue - } - fields := strings.Fields(line) - if len(fields) != expectedNumProcMntFieldsPerLine { - return nil, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", - expectedNumProcMntFieldsPerLine, len(fields), line) - } - - mp := MountPoint{ - Device: fields[0], - Path: fields[1], - Type: fields[2], - Opts: strings.Split(fields[3], ","), - } - - freq, err := strconv.Atoi(fields[4]) - if err != nil { - return nil, err - } - mp.Freq = freq - - pass, err := strconv.Atoi(fields[5]) - if err != nil { - return nil, err - } - mp.Pass = pass - - out = append(out, mp) - } - return out, nil -} - -// ConsistentRead repeatedly reads a file until it gets the same content twice. -// This is useful when reading files in /proc that are larger than page size -// and kernel may modify them between individual read() syscalls. -func ConsistentRead(filename string, attempts int) ([]byte, error) { - oldContent, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - for i := 0; i < attempts; i++ { - newContent, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - if bytes.Compare(oldContent, newContent) == 0 { - return newContent, nil - } - // Files are different, continue reading - oldContent = newContent - } - return nil, fmt.Errorf("could not get consistent content of %s after %d attempts", filename, attempts) -} diff --git a/vendor/github.com/netapp/trident/utils/locks.go b/vendor/github.com/netapp/trident/utils/locks.go deleted file mode 100644 index 11f00cff0..000000000 --- a/vendor/github.com/netapp/trident/utils/locks.go +++ /dev/null @@ -1,60 +0,0 @@ -package utils - -import ( - "sync" - - log "github.com/sirupsen/logrus" -) - -type locks struct { - lockMap map[string]*sync.Mutex - createLock *sync.Mutex -} - -var sharedLocks *locks - -// init initializes the shared locks struct exactly once per runtime. -func init() { - sharedLocks = &locks{ - lockMap: map[string]*sync.Mutex{}, - createLock: &sync.Mutex{}, - } -} - -// getLock returns a mutex with the specified ID. If the lock does not exist, one is created. -// This method uses the check-lock-check pattern to defend against race conditions where multiple -// callers try to get a non-existent lock at the same time. -func getLock(lockID string) *sync.Mutex { - - var lock *sync.Mutex - var ok bool - - if lock, ok = sharedLocks.lockMap[lockID]; !ok { - - sharedLocks.createLock.Lock() - defer sharedLocks.createLock.Unlock() - - if lock, ok = sharedLocks.lockMap[lockID]; !ok { - lock = &sync.Mutex{} - sharedLocks.lockMap[lockID] = lock - log.WithField("lock", lockID).Debug("Created shared lock.") - } - } - - return lock -} - -// Lock acquires a mutex with the specified ID. The mutex does not need to exist before -// calling this method. The semantics of this method are intentionally identical to sync.Mutex.Lock(). -func Lock(ctx, lockID string) { - log.WithField("lock", lockID).Debugf("Attempting to acquire shared lock (%s).", ctx) - getLock(lockID).Lock() - log.WithField("lock", lockID).Debugf("Acquired shared lock (%s).", ctx) -} - -// Unlock releases a mutex with the specified ID. The semantics of this method are intentionally -// identical to sync.Mutex.Unlock(). -func Unlock(ctx, lockID string) { - getLock(lockID).Unlock() - log.WithField("lock", lockID).Debugf("Released shared lock (%s).", ctx) -} diff --git a/vendor/github.com/netapp/trident/utils/osutils.go b/vendor/github.com/netapp/trident/utils/osutils.go deleted file mode 100644 index 0be140337..000000000 --- a/vendor/github.com/netapp/trident/utils/osutils.go +++ /dev/null @@ -1,2275 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package utils - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - "syscall" - "time" - - "github.com/cenkalti/backoff" - log "github.com/sirupsen/logrus" -) - -const ( - iSCSIErrNoObjsFound = 21 - iSCSIDeviceDiscoveryTimeoutSecs = 90 - multipathDeviceDiscoveryTimeoutSecs = 90 - resourceDeletionTimeoutSecs = 40 - fsRaw = "raw" - temporaryMountDir = "/tmp_mnt" -) - -var xtermControlRegex = regexp.MustCompile(`\x1B\[[0-9;]*[a-zA-Z]`) -var pidRunningRegex = regexp.MustCompile(`pid \d+ running`) -var pidRegex = regexp.MustCompile(`^\d+$`) -var chrootPathPrefix string - -func init() { - if os.Getenv("DOCKER_PLUGIN_MODE") != "" { - chrootPathPrefix = "/host" - } else { - chrootPathPrefix = "" - } -} - -// Attach the volume to the local host. This method must be able to accomplish its task using only the data passed in. -// It may be assumed that this method always runs on the host to which the volume will be attached. -func AttachNFSVolume(name, mountpoint string, publishInfo *VolumePublishInfo) error { - - log.Debug(">>>> osutils.AttachNFSVolume") - defer log.Debug("<<<< osutils.AttachNFSVolume") - - var exportPath = fmt.Sprintf("%s:%s", publishInfo.NfsServerIP, publishInfo.NfsPath) - var options = publishInfo.MountOptions - - log.WithFields(log.Fields{ - "volume": name, - "exportPath": exportPath, - "mountpoint": mountpoint, - "options": options, - }).Debug("Publishing NFS volume.") - - return mountNFSPath(exportPath, mountpoint, options) -} - -// Attach the volume to the local host. This method must be able to accomplish its task using only the data passed in. -// It may be assumed that this method always runs on the host to which the volume will be attached. If the mountpoint -// parameter is specified, the volume will be mounted. The device path is set on the in-out publishInfo parameter -// so that it may be mounted later instead. -func AttachISCSIVolume(name, mountpoint string, publishInfo *VolumePublishInfo) error { - - log.Debug(">>>> osutils.AttachISCSIVolume") - defer log.Debug("<<<< osutils.AttachISCSIVolume") - - var err error - var lunID = int(publishInfo.IscsiLunNumber) - - var bkportal []string - var portalIps []string - bkportal = append(bkportal, publishInfo.IscsiTargetPortal) - portalIps = append(portalIps, strings.Split(publishInfo.IscsiTargetPortal, ":")[0]) - for _, p := range publishInfo.IscsiPortals { - bkportal = append(bkportal, p) - portalIps = append(portalIps, strings.Split(p, ":")[0]) - } - - var targetIQN = publishInfo.IscsiTargetIQN - var username = publishInfo.IscsiUsername - var initiatorSecret = publishInfo.IscsiInitiatorSecret - var iscsiInterface = publishInfo.IscsiInterface - var fstype = publishInfo.FilesystemType - var options = publishInfo.MountOptions - - log.WithFields(log.Fields{ - "volume": name, - "mountpoint": mountpoint, - "lunID": lunID, - "targetPortals": bkportal, - "targetIQN": targetIQN, - "fstype": fstype, - }).Debug("Attaching iSCSI volume.") - - if ISCSISupported() == false { - err := errors.New("unable to attach: open-iscsi tools not found on host") - log.Errorf("Unable to attach volume: open-iscsi utils not found") - return err - } - - // If not logged in, login first - sessionExists, err := iSCSISessionExistsToTargetIQN(targetIQN) - if err != nil { - return err - } - if !sessionExists { - if publishInfo.UseCHAP { - for _, portal := range bkportal { - err = loginWithChap(targetIQN, portal, username, initiatorSecret, iscsiInterface, false) - if err != nil { - log.Errorf("Failed to login with CHAP credentials: %+v ", err) - return fmt.Errorf("iSCSI login error: %v", err) - } - } - } else { - err = EnsureISCSISessions(portalIps) - if err != nil { - return fmt.Errorf("iSCSI session error: %v", err) - } - } - } - - // If LUN isn't present, scan the target and wait for the device(s) to appear - // if not attached need to scan - shouldScan := !IsAlreadyAttached(lunID, targetIQN) - err = waitForDeviceScanIfNeeded(lunID, targetIQN, shouldScan) - if err != nil { - log.Errorf("Could not find iSCSI device: %+v", err) - return err - } - - err = waitForMultipathDeviceForLUN(lunID, targetIQN) - if err != nil { - return err - } - - // Lookup all the SCSI device information - deviceInfo, err := getDeviceInfoForLUN(lunID, targetIQN) - if err != nil { - return fmt.Errorf("error getting iSCSI device information: %v", err) - } else if deviceInfo == nil { - return fmt.Errorf("could not get iSCSI device information for LUN %d", lunID) - } - - log.WithFields(log.Fields{ - "scsiLun": deviceInfo.LUN, - "multipathDevice": deviceInfo.MultipathDevice, - "devices": deviceInfo.Devices, - "fsType": deviceInfo.Filesystem, - "iqn": deviceInfo.IQN, - }).Debug("Found device.") - - // Make sure we use the proper device (multipath if in use) - deviceToUse := deviceInfo.Devices[0] - if deviceInfo.MultipathDevice != "" { - deviceToUse = deviceInfo.MultipathDevice - } - if deviceToUse == "" { - return fmt.Errorf("could not determine device to use for %v", name) - } - devicePath := "/dev/" + deviceToUse - if err := waitForDevice(devicePath); err != nil { - return fmt.Errorf("could not find device %v; %s", devicePath, err) - } - - // Return the device in the publish info in case the mount will be done later - publishInfo.DevicePath = devicePath - - if fstype == fsRaw { - return nil - } - - existingFstype := deviceInfo.Filesystem - if existingFstype == "" { - log.WithFields(log.Fields{"volume": name, "fstype": fstype}).Debug("Formatting LUN.") - err := formatVolume(devicePath, fstype) - if err != nil { - return fmt.Errorf("error formatting LUN %s, device %s: %v", name, deviceToUse, err) - } - } else if existingFstype != fstype { - log.WithFields(log.Fields{ - "volume": name, - "existingFstype": existingFstype, - "requestedFstype": fstype, - }).Error("LUN already formatted with a different file system type.") - return fmt.Errorf("LUN %s, device %s already formatted with other filesystem: %s", - name, deviceToUse, existingFstype) - } else { - log.WithFields(log.Fields{ - "volume": name, - "fstype": deviceInfo.Filesystem, - }).Debug("LUN already formatted.") - } - - // Optionally mount the device - if mountpoint != "" { - if err := MountDevice(devicePath, mountpoint, options, false); err != nil { - return fmt.Errorf("error mounting LUN %v, device %v, mountpoint %v; %s", - name, deviceToUse, mountpoint, err) - } - } - - return nil -} - -// DFInfo data structure for wrapping the parsed output from the 'df' command -type DFInfo struct { - Target string - Source string -} - -// GetDFOutput returns parsed DF output -func GetDFOutput() ([]DFInfo, error) { - - log.Debug(">>>> osutils.GetDFOutput") - defer log.Debug("<<<< osutils.GetDFOutput") - - var result []DFInfo - out, err := execCommand("df", "--output=target,source") - if err != nil { - // df returns an error if there's a stale file handle that we can - // safely ignore. There may be other reasons. Consider it a warning if - // it printed anything to stdout. - if len(out) == 0 { - log.Error("Error encountered gathering df output.") - return nil, err - } - } - - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - for _, l := range lines { - - a := strings.Fields(l) - if len(a) > 1 { - result = append(result, DFInfo{ - Target: a[0], - Source: a[1], - }) - } - } - if len(result) > 1 { - return result[1:], nil - } - return result, nil -} - -// GetInitiatorIqns returns parsed contents of /etc/iscsi/initiatorname.iscsi -func GetInitiatorIqns() ([]string, error) { - - log.Debug(">>>> osutils.GetInitiatorIqns") - defer log.Debug("<<<< osutils.GetInitiatorIqns") - - var iqns []string - out, err := execCommand("cat", "/etc/iscsi/initiatorname.iscsi") - if err != nil { - log.Error("Error gathering initiator names.") - return nil, err - } - lines := strings.Split(string(out), "\n") - for _, l := range lines { - if strings.Contains(l, "InitiatorName=") { - iqns = append(iqns, strings.Split(l, "=")[1]) - } - } - return iqns, nil -} - -// PathExists returns true if the file/directory at the specified path exists, -// false otherwise or if an error occurs. -func PathExists(path string) bool { - if _, err := os.Stat(path); err == nil { - return true - } - return false -} - -// EnsureFileExists makes sure that file of given name exists -func EnsureFileExists(path string) error { - fields := log.Fields{"path": path} - if info, err := os.Stat(path); err == nil { - if info.IsDir() { - log.WithFields(fields).Error("Path exists but is a directory") - return fmt.Errorf("path exists but is a directory: %s", path) - } - return nil - } else if !os.IsNotExist(err) { - log.WithFields(fields).Errorf("Can't determine if file exists; %s", err) - return fmt.Errorf("can't determine if file %s exists; %s", path, err) - } - - file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC, 0600) - if nil != err { - log.WithFields(fields).Errorf("OpenFile failed; %s", err) - return fmt.Errorf("failed to create file %s; %s", path, err) - } - file.Close() - - return nil -} - -// DeleteResourceAtPath makes sure that given named file or (empty) directory is removed -func DeleteResourceAtPath(resource string) error { - return waitForResourceDeletionAtPath(resource) -} - -// waitForResourceDeletionAtPath accepts a resource name and waits until it is deleted and returns error if it times out -func waitForResourceDeletionAtPath(resource string) error { - - fields := log.Fields{"resource": resource} - log.WithFields(fields).Debug(">>>> osutils.waitForResourceDeletionAtPath") - defer log.WithFields(fields).Debug("<<<< osutils.waitForResourceDeletionAtPath") - - maxDuration := resourceDeletionTimeoutSecs * time.Second - - checkResourceDeletion := func() error { - if _, err := os.Stat(resource); err == nil { - if err = os.Remove(resource); err != nil { - log.WithFields(fields).Debugf("Failed to remove resource, %s", err) - return fmt.Errorf("Failed to remove resource %s; %s", resource, err) - } - return nil - } else if !os.IsNotExist(err) { - log.WithFields(fields).Debugf("Can't determine if resource exists; %s", err) - return fmt.Errorf("can't determine if resource %s exists; %s", resource, err) - } - - return nil - } - - deleteNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Resource not deleted yet, waiting.") - } - - deleteBackoff := backoff.NewExponentialBackOff() - deleteBackoff.InitialInterval = 1 * time.Second - deleteBackoff.Multiplier = 1.414 // approx sqrt(2) - deleteBackoff.RandomizationFactor = 0.1 - deleteBackoff.MaxElapsedTime = maxDuration - - // Run the check using an exponential backoff - if err := backoff.RetryNotify(checkResourceDeletion, deleteBackoff, deleteNotify); err != nil { - return fmt.Errorf("could not delete resource after %3.2f seconds", maxDuration.Seconds()) - } else { - log.WithField("resource", resource).Debug("Resource deleted.") - return nil - } -} - -// EnsureDirExists makes sure that given directory structure exists -func EnsureDirExists(path string) error { - fields := log.Fields{ - "path": path, - } - if info, err := os.Stat(path); err == nil { - if !info.IsDir() { - log.WithFields(fields).Error("Path exists but is not a directory") - return fmt.Errorf("path exists but is not a directory: %s", path) - } - return nil - } else if !os.IsNotExist(err) { - log.WithFields(fields).Errorf("Can't determine if directory exists; %s", err) - return fmt.Errorf("can't determine if directory %s exists; %s", path, err) - } - - err := os.MkdirAll(path, 0755) - if err != nil { - log.WithFields(fields).Errorf("Mkdir failed; %s", err) - return fmt.Errorf("failed to mkdir %s; %s", path, err) - } - - return nil -} - -// getSysfsBlockDirsForLUN returns the list of directories in sysfs where the block devices should appear -// after the scan is successful. One directory is returned for each path in the host session map. -func getSysfsBlockDirsForLUN(lunID int, hostSessionMap map[int]int) []string { - - paths := make([]string, 0) - for hostNumber, sessionNumber := range hostSessionMap { - path := fmt.Sprintf(chrootPathPrefix+"/sys/class/scsi_host/host%d/device/session%d/iscsi_session/session%d/device/target%d:0:0/%d:0:0:%d", - hostNumber, sessionNumber, sessionNumber, hostNumber, hostNumber, lunID) - paths = append(paths, path) - } - return paths -} - -// getDevicesForLUN find the /dev/sd* device names for an iSCSI LUN. -func getDevicesForLUN(paths []string) ([]string, error) { - - devices := make([]string, 0) - for _, path := range paths { - dirname := path + "/block" - if !PathExists(dirname) { - continue - } - dirFd, err := os.Open(dirname) - if err != nil { - return nil, err - } - list, err := dirFd.Readdir(1) - dirFd.Close() - if err != nil { - return nil, err - } - if 0 == len(list) { - continue - } - devices = append(devices, list[0].Name()) - } - return devices, nil -} - -// waitForDeviceScanIfNeeded scans all paths to a specific LUN and waits until all -// SCSI disk-by-path devices for that LUN are present on the host. -func waitForDeviceScanIfNeeded(lunID int, iSCSINodeName string, shouldScan bool) error { - - fields := log.Fields{ - "lunID": lunID, - "iSCSINodeName": iSCSINodeName, - } - log.WithFields(fields).Debug(">>>> osutils.waitForDeviceScanIfNeeded") - defer log.WithFields(fields).Debug("<<<< osutils.waitForDeviceScanIfNeeded") - - hostSessionMap := getISCSIHostSessionMapForTarget(iSCSINodeName) - if len(hostSessionMap) == 0 { - return fmt.Errorf("no iSCSI hosts found for target %s", iSCSINodeName) - } - - log.WithField("hostSessionMap", hostSessionMap).Debug("Built iSCSI host/session map.") - hosts := make([]int, 0) - for hostNumber := range hostSessionMap { - hosts = append(hosts, hostNumber) - } - - if shouldScan { - if err := iSCSIScanTargetLUN(lunID, hosts); err != nil { - log.WithField("scanError", err).Error("Could not scan for new LUN.") - } - } - - paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap) - log.Debugf("Scanning paths: %v", paths) - found := make([]string, 0) - - checkAllDevicesExist := func() error { - - found := make([]string, 0) - // Check if any paths present, and return nil (success) if so - for _, path := range paths { - dirname := path + "/block" - if !PathExists(dirname) { - return errors.New("device not present yet") - } - found = append(found, dirname) - } - return nil - } - - devicesNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("All devices not yet present, waiting.") - } - - deviceBackoff := backoff.NewExponentialBackOff() - deviceBackoff.InitialInterval = 1 * time.Second - deviceBackoff.Multiplier = 1.414 // approx sqrt(2) - deviceBackoff.RandomizationFactor = 0.1 - deviceBackoff.MaxElapsedTime = 5 * time.Second - - if err := backoff.RetryNotify(checkAllDevicesExist, deviceBackoff, devicesNotify); err == nil { - log.Debugf("Paths found: %v", found) - return nil - } - - log.Debugf("Paths found so far: %v", found) - - checkAnyDeviceExists := func() error { - - found := make([]string, 0) - // Check if any paths present, and return nil (success) if so - for _, path := range paths { - dirname := path + "/block" - if PathExists(dirname) { - found = append(found, dirname) - } - } - if 0 == len(found) { - return errors.New("no devices present yet") - } - return nil - } - - devicesNotify = func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("No devices present yet, waiting.") - } - - deviceBackoff = backoff.NewExponentialBackOff() - deviceBackoff.InitialInterval = 1 * time.Second - deviceBackoff.Multiplier = 1.414 // approx sqrt(2) - deviceBackoff.RandomizationFactor = 0.1 - deviceBackoff.MaxElapsedTime = (iSCSIDeviceDiscoveryTimeoutSecs - 5) * time.Second - - // Run the check/scan using an exponential backoff - if err := backoff.RetryNotify(checkAnyDeviceExists, deviceBackoff, devicesNotify); err != nil { - log.Warnf("Could not find all devices after %d seconds.", iSCSIDeviceDiscoveryTimeoutSecs) - - // In the case of a failure, log info about what devices are present - execCommand("ls", "-al", "/dev") - execCommand("ls", "-al", "/dev/mapper") - execCommand("ls", "-al", "/dev/disk/by-path") - execCommand("lsscsi") - execCommand("lsscsi", "-t") - execCommand("free") - return err - } - - log.Debugf("Paths found: %v", found) - return nil -} - -// ScsiDeviceInfo contains information about SCSI devices -type ScsiDeviceInfo struct { - Host string - Channel string - Target string - LUN string - Devices []string - MultipathDevice string - Filesystem string - IQN string - HostSessionMap map[int]int -} - -// getDeviceInfoForLUN finds iSCSI devices using /dev/disk/by-path values. This method should be -// called after calling waitForDeviceScanIfNeeded so that the device paths are known to exist. -func getDeviceInfoForLUN(lunID int, iSCSINodeName string) (*ScsiDeviceInfo, error) { - - fields := log.Fields{ - "lunID": lunID, - "iSCSINodeName": iSCSINodeName, - } - log.WithFields(fields).Debug(">>>> osutils.getDeviceInfoForLUN") - defer log.WithFields(fields).Debug("<<<< osutils.getDeviceInfoForLUN") - - hostSessionMap := getISCSIHostSessionMapForTarget(iSCSINodeName) - if len(hostSessionMap) == 0 { - return nil, fmt.Errorf("no iSCSI hosts found for target %s", iSCSINodeName) - } - - paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap) - - devices, err := getDevicesForLUN(paths) - if nil != err { - return nil, err - } else if 0 == len(devices) { - return nil, fmt.Errorf("scan not completed for LUN %d on target %s", lunID, iSCSINodeName) - } - - multipathDevice := "" - for _, device := range devices { - multipathDevice = findMultipathDeviceForDevice(device) - if multipathDevice != "" { - break - } - } - - fsType := "" - if multipathDevice != "" { - fsType = getFSType("/dev/" + multipathDevice) - } else { - fsType = getFSType("/dev/" + devices[0]) - } - - log.WithFields(log.Fields{ - "LUN": strconv.Itoa(lunID), - "multipathDevice": multipathDevice, - "fsType": fsType, - "deviceNames": devices, - "hostSessionMap": hostSessionMap, - }).Debug("Found SCSI device.") - - info := &ScsiDeviceInfo{ - LUN: strconv.Itoa(lunID), - MultipathDevice: multipathDevice, - Devices: devices, - Filesystem: fsType, - IQN: iSCSINodeName, - HostSessionMap: hostSessionMap, - } - - return info, nil -} - -// getDeviceInfoForMountPath discovers the device that is currently mounted at the specified mount path. It -// uses the ScsiDeviceInfo struct so that it may return a multipath device (if any) plus one or more underlying -// physical devices. -func getDeviceInfoForMountPath(mountpath string) (*ScsiDeviceInfo, error) { - - fields := log.Fields{"mountpath": mountpath} - log.WithFields(fields).Debug(">>>> osutils.getDeviceInfoForMountPath") - defer log.WithFields(fields).Debug("<<<< osutils.getDeviceInfoForMountPath") - - device, _, err := GetDeviceNameFromMount(mountpath) - if err != nil { - return nil, err - } - - device, err = filepath.EvalSymlinks(device) - if err != nil { - return nil, err - } - - device = strings.TrimPrefix(device, "/dev/") - - var deviceInfo *ScsiDeviceInfo - - if !strings.HasPrefix(device, "dm-") { - deviceInfo = &ScsiDeviceInfo{ - Devices: []string{device}, - } - } else { - deviceInfo = &ScsiDeviceInfo{ - Devices: findDevicesForMultipathDevice(device), - MultipathDevice: device, - } - } - - log.WithFields(log.Fields{ - "multipathDevice": deviceInfo.MultipathDevice, - "devices": deviceInfo.Devices, - }).Debug("Found SCSI device.") - - return deviceInfo, nil -} - -// waitForMultipathDeviceForLUN -func waitForMultipathDeviceForLUN(lunID int, iSCSINodeName string) error { - fields := log.Fields{ - "lunID": lunID, - "iSCSINodeName": iSCSINodeName, - } - log.WithFields(fields).Debug(">>>> osutils.waitForMultipathDeviceForLUN") - defer log.WithFields(fields).Debug("<<<< osutils.waitForMultipathDeviceForLUN") - - hostSessionMap := getISCSIHostSessionMapForTarget(iSCSINodeName) - if len(hostSessionMap) == 0 { - return fmt.Errorf("no iSCSI hosts found for target %s", iSCSINodeName) - } - - paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap) - - devices, err := getDevicesForLUN(paths) - if nil != err { - return err - } - - waitForMultipathDeviceForDevices(devices) - return nil -} - -// waitForMultipathDeviceForDevices accepts a list of sd* device names and waits until -// a multipath device is present for at least one of those. It returns the name of the -// multipath device, or an empty string if multipathd isn't running or there is only one path. -func waitForMultipathDeviceForDevices(devices []string) string { - - fields := log.Fields{"devices": devices} - log.WithFields(fields).Debug(">>>> osutils.waitForMultipathDeviceForDevices") - defer log.WithFields(fields).Debug("<<<< osutils.waitForMultipathDeviceForDevices") - - if len(devices) <= 1 { - log.Debugf("Skipping multipath discovery, %d device(s) specified.", len(devices)) - return "" - } else if !multipathdIsRunning() { - log.Debug("Skipping multipath discovery, multipathd isn't running.") - return "" - } - - maxDuration := multipathDeviceDiscoveryTimeoutSecs * time.Second - multipathDevice := "" - - checkMultipathDeviceExists := func() error { - - for _, device := range devices { - multipathDevice = findMultipathDeviceForDevice(device) - if multipathDevice != "" { - return nil - } - } - if multipathDevice == "" { - return errors.New("multipath device not yet present") - } - return nil - } - - deviceNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Multipath device not yet present, waiting.") - } - - multipathDeviceBackoff := backoff.NewExponentialBackOff() - multipathDeviceBackoff.InitialInterval = 1 * time.Second - multipathDeviceBackoff.Multiplier = 1.414 // approx sqrt(2) - multipathDeviceBackoff.RandomizationFactor = 0.1 - multipathDeviceBackoff.MaxElapsedTime = maxDuration - - // Run the check/scan using an exponential backoff - if err := backoff.RetryNotify(checkMultipathDeviceExists, multipathDeviceBackoff, deviceNotify); err != nil { - log.Warnf("Could not find multipath device after %3.2f seconds.", maxDuration.Seconds()) - } else { - log.WithField("multipathDevice", multipathDevice).Debug("Multipath device found.") - } - return multipathDevice -} - -// waitForDevice accepts a device name and waits until it is present and returns error if it times out -func waitForDevice(device string) error { - - fields := log.Fields{"device": device} - log.WithFields(fields).Debug(">>>> osutils.waitForDevice") - defer log.WithFields(fields).Debug("<<<< osutils.waitForDevice") - - maxDuration := multipathDeviceDiscoveryTimeoutSecs * time.Second - - checkDeviceExists := func() error { - if !PathExists(device) { - return errors.New("device not yet present") - } - return nil - } - - deviceNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Device not yet present, waiting.") - } - - deviceBackoff := backoff.NewExponentialBackOff() - deviceBackoff.InitialInterval = 1 * time.Second - deviceBackoff.Multiplier = 1.414 // approx sqrt(2) - deviceBackoff.RandomizationFactor = 0.1 - deviceBackoff.MaxElapsedTime = maxDuration - - // Run the check using an exponential backoff - if err := backoff.RetryNotify(checkDeviceExists, deviceBackoff, deviceNotify); err != nil { - return fmt.Errorf("could not find device after %3.2f seconds", maxDuration.Seconds()) - } else { - log.WithField("device", device).Debug("Device found.") - return nil - } -} - -// findMultipathDeviceForDevice finds the devicemapper parent of a device name like /dev/sdx. -func findMultipathDeviceForDevice(device string) string { - - log.WithField("device", device).Debug(">>>> osutils.findMultipathDeviceForDevice") - defer log.WithField("device", device).Debug("<<<< osutils.findMultipathDeviceForDevice") - - holdersDir := chrootPathPrefix + "/sys/block/" + device + "/holders" - if dirs, err := ioutil.ReadDir(holdersDir); err == nil { - for _, f := range dirs { - name := f.Name() - if strings.HasPrefix(name, "dm-") { - return name - } - } - } - - log.WithField("device", device).Debug("Could not find multipath device for device.") - return "" -} - -// findDevicesForMultipathDevice finds the constituent devices for a devicemapper parent device like /dev/dm-0. -func findDevicesForMultipathDevice(device string) []string { - - log.WithField("device", device).Debug(">>>> osutils.findDevicesForMultipathDevice") - defer log.WithField("device", device).Debug("<<<< osutils.findDevicesForMultipathDevice") - - devices := make([]string, 0) - - slavesDir := chrootPathPrefix + "/sys/block/" + device + "/slaves" - if dirs, err := ioutil.ReadDir(slavesDir); err == nil { - for _, f := range dirs { - name := f.Name() - if strings.HasPrefix(name, "sd") { - devices = append(devices, name) - } - } - } - - if len(devices) == 0 { - log.WithField("device", device).Debug("Could not find devices for multipath device.") - } else { - log.WithFields(log.Fields{ - "device": device, - "devices": devices, - }).Debug("Found devices for multipath device.") - } - - return devices -} - -// PrepareDeviceForRemoval informs Linux that a device will be removed. -func PrepareDeviceForRemoval(lunID int, iSCSINodeName string) { - - fields := log.Fields{ - "lunID": lunID, - "iSCSINodeName": iSCSINodeName, - "chrootPathPrefix": chrootPathPrefix, - } - log.WithFields(fields).Debug(">>>> osutils.PrepareDeviceForRemoval") - defer log.WithFields(fields).Debug("<<<< osutils.PrepareDeviceForRemoval") - - deviceInfo, err := getDeviceInfoForLUN(lunID, iSCSINodeName) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "lunID": lunID, - }).Info("Could not get device info for removal, skipping host removal steps.") - return - } - - removeSCSIDevice(deviceInfo) -} - -// PrepareDeviceAtMountPathForRemoval informs Linux that a device will be removed. -func PrepareDeviceAtMountPathForRemoval(mountpoint string, unmount bool) error { - - fields := log.Fields{"mountpoint": mountpoint} - log.WithFields(fields).Debug(">>>> osutils.PrepareDeviceAtMountPathForRemoval") - defer log.WithFields(fields).Debug("<<<< osutils.PrepareDeviceAtMountPathForRemoval") - - deviceInfo, err := getDeviceInfoForMountPath(mountpoint) - if err != nil { - return err - } - - if unmount { - if err := Umount(mountpoint); err != nil { - return err - } - } - - removeSCSIDevice(deviceInfo) - return nil -} - -// removeSCSIDevice informs Linux that a device will be removed. The deviceInfo provided only needs -// the devices and multipathDevice fields set. -func removeSCSIDevice(deviceInfo *ScsiDeviceInfo) { - - // Flush multipath device - multipathFlushDevice(deviceInfo) - - // Flush devices - flushDevice(deviceInfo) - - // Remove device - removeDevice(deviceInfo) - - // Give the host a chance to fully process the removal - time.Sleep(time.Second) -} - -// ISCSISupported returns true if iscsiadm is installed and in the PATH. -func ISCSISupported() bool { - - log.Debug(">>>> osutils.ISCSISupported") - defer log.Debug("<<<< osutils.ISCSISupported") - - _, err := execIscsiadmCommand("-V") - if err != nil { - log.Debug("iscsiadm tools not found on this host.") - return false - } - return true -} - -// ISCSIDiscoveryInfo contains information about discovered iSCSI targets. -type ISCSIDiscoveryInfo struct { - Portal string - PortalIP string - TargetName string -} - -// iSCSIDiscovery uses the 'iscsiadm' command to perform discovery. -func iSCSIDiscovery(portal string) ([]ISCSIDiscoveryInfo, error) { - - log.WithField("portal", portal).Debug(">>>> osutils.iSCSIDiscovery") - defer log.Debug("<<<< osutils.iSCSIDiscovery") - - out, err := execIscsiadmCommand("-m", "discovery", "-t", "sendtargets", "-p", portal) - if err != nil { - return nil, err - } - - /* - iscsiadm -m discovery -t st -p 10.63.152.249:3260 - - 10.63.152.249:3260,1 iqn.1992-08.com.netapp:2752.600a0980006074c20000000056b32c4d - 10.63.152.250:3260,2 iqn.1992-08.com.netapp:2752.600a0980006074c20000000056b32c4d - - a[0]==10.63.152.249:3260,1 - a[1]==iqn.1992-08.com.netapp:2752.600a0980006074c20000000056b32c4d - */ - - var discoveryInfo []ISCSIDiscoveryInfo - - lines := strings.Split(string(out), "\n") - for _, l := range lines { - a := strings.Fields(l) - if len(a) >= 2 { - - portalIP := strings.Split(a[0], ":")[0] - - discoveryInfo = append(discoveryInfo, ISCSIDiscoveryInfo{ - Portal: a[0], - PortalIP: portalIP, - TargetName: a[1], - }) - - log.WithFields(log.Fields{ - "Portal": a[0], - "PortalIP": portalIP, - "TargetName": a[1], - }).Debug("Adding iSCSI discovery info.") - } - } - return discoveryInfo, nil -} - -// ISCSISessionInfo contains information about iSCSI sessions. -type ISCSISessionInfo struct { - SID string - Portal string - PortalIP string - TargetName string -} - -// getISCSISessionInfo parses output from 'iscsiadm -m session' and returns the parsed output. -func getISCSISessionInfo() ([]ISCSISessionInfo, error) { - - log.Debug(">>>> osutils.getISCSISessionInfo") - defer log.Debug("<<<< osutils.getISCSISessionInfo") - - out, err := execIscsiadmCommand("-m", "session") - if err != nil { - exitErr, ok := err.(*exec.ExitError) - if ok && exitErr.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() == iSCSIErrNoObjsFound { - log.Debug("No iSCSI session found.") - return []ISCSISessionInfo{}, nil - } else { - log.WithField("error", err).Error("Problem checking iSCSI sessions.") - return nil, err - } - } - - /* - # iscsiadm -m session - - tcp: [3] 10.0.207.7:3260,1028 iqn.1992-08.com.netapp:sn.afbb1784f77411e582f8080027e22798:vs.3 (non-flash) - tcp: [4] 10.0.207.9:3260,1029 iqn.1992-08.com.netapp:sn.afbb1784f77411e582f8080027e22798:vs.3 (non-flash) - - a[0]==tcp: - a[1]==[4] - a[2]==10.0.207.9:3260,1029 - a[3]==iqn.1992-08.com.netapp:sn.afbb1784f77411e582f8080027e22798:vs.3 - a[4]==(non-flash) - */ - - var sessionInfo []ISCSISessionInfo - - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - for _, l := range lines { - - a := strings.Fields(l) - if len(a) > 3 { - sid := a[1] - sid = sid[1 : len(sid)-1] - - portalIP := strings.Split(a[2], ":")[0] - sessionInfo = append(sessionInfo, ISCSISessionInfo{ - SID: sid, - Portal: a[2], - PortalIP: portalIP, - TargetName: a[3], - }) - - log.WithFields(log.Fields{ - "SID": sid, - "Portal": a[2], - "PortalIP": portalIP, - "TargetName": a[3], - }).Debug("Adding iSCSI session info.") - } - } - - return sessionInfo, nil -} - -// ISCSIDisableDelete logs out from the supplied target and removes the iSCSI device. -func ISCSIDisableDelete(targetIQN, targetPortal string) error { - - logFields := log.Fields{ - "targetIQN": targetIQN, - "targetPortal": targetPortal, - } - log.WithFields(logFields).Debug(">>>> osutils.ISCSIDisableDelete") - defer log.WithFields(logFields).Debug("<<<< osutils.ISCSIDisableDelete") - - _, err := execIscsiadmCommand("-m", "node", "-T", targetIQN, "--portal", targetPortal, "-u") - if err != nil { - log.WithField("error", err).Debug("Error during iSCSI logout.") - } - - _, err = execIscsiadmCommand("-m", "node", "-o", "delete", "-T", targetIQN) - return err -} - -// UmountAndRemoveTemporaryMountPoint unmounts and removes the TemporaryMountDir -func UmountAndRemoveTemporaryMountPoint(mountPath string) error { - log.Debug(">>>> osutils.UmountAndRemoveTemporaryMountPoint") - defer log.Debug("<<<< osutils.UmountAndRemoveTemporaryMountPoint") - - // Delete the temporary mount point if it exists. - tmpDir := path.Join(mountPath, temporaryMountDir) - if _, err := os.Stat(tmpDir); err == nil { - if err = removeMountPoint(tmpDir); err != nil { - return fmt.Errorf("failed to remove directory in staging target path %s; %s", tmpDir, err) - } - } else if !os.IsNotExist(err) { - log.WithField("temporaryMountPoint", tmpDir).Errorf("Can't determine if temporary dir path exists; %s", err) - return fmt.Errorf("can't determine if temporary dir path %s exists; %s", tmpDir, err) - } - - return nil -} - -// removeMountPoint attempts to unmount and remove the directory of the mountPointPath -func removeMountPoint(mountPointPath string) error { - log.Debug(">>>> osutils.removeMountPoint") - defer log.Debug("<<<< osutils.removeMountPoint") - - err := Umount(mountPointPath) - if err != nil { - log.WithField("mountPointPath", mountPointPath).Errorf("Umount failed; %s", err) - return err - } - - err = os.Remove(mountPointPath) - if err != nil { - log.WithField("mountPointPath", mountPointPath).Errorf("Remove dir failed; %s", err) - return fmt.Errorf("failed to remove dir %s; %s", mountPointPath, err) - } - return nil -} - -// mountFilesystemForResize expands a filesystem. The xfs_growfs utility requires a mount point to expand the -// filesystem. Determining the size of the filesystem requires that the filesystem be mounted. -func mountFilesystemForResize(devicePath string, stagedTargetPath string, mountOptions string) (string, error) { - logFields := log.Fields{ - "devicePath": devicePath, - "stagedTargetPath": stagedTargetPath, - "mountOptions": mountOptions, - } - log.WithFields(logFields).Debug(">>>> osutils.mountAndExpandFilesystem") - defer log.WithFields(logFields).Debug("<<<< osutils.mountAndExpandFilesystem") - - tmpMountPoint := path.Join(stagedTargetPath, temporaryMountDir) - err := MountDevice(devicePath, tmpMountPoint, mountOptions, false) - if err != nil { - return "", fmt.Errorf("unable to mount device; %s", err) - } - return tmpMountPoint, nil -} - -// ExpandISCSIFilesystem will expand the filesystem of an already expanded volume. -func ExpandISCSIFilesystem(publishInfo *VolumePublishInfo, stagedTargetPath string) (int64, error) { - devicePath := publishInfo.DevicePath - logFields := log.Fields{ - "devicePath": devicePath, - "stagedTargetPath": stagedTargetPath, - "mountOptions": publishInfo.MountOptions, - "filesystemType": publishInfo.FilesystemType, - } - log.WithFields(logFields).Debug(">>>> osutils.ExpandISCSIFilesystem") - defer log.WithFields(logFields).Debug("<<<< osutils.ExpandISCSIFilesystem") - - tmpMountPoint, err := mountFilesystemForResize(publishInfo.DevicePath, stagedTargetPath, publishInfo.MountOptions) - if err != nil { - return 0, err - } - defer removeMountPoint(tmpMountPoint) - - // Don't need to verify the filesystem type as the resize utilities will throw an error if the filesystem - // is not the correct type. - var size int64 - switch publishInfo.FilesystemType { - case "xfs": - size, err = expandFilesystem("xfs_growfs", tmpMountPoint, tmpMountPoint) - if err != nil { - return 0, err - } - case "ext3", "ext4": - size, err = expandFilesystem("resize2fs", devicePath, tmpMountPoint) - if err != nil { - return 0, err - } - default: - err = fmt.Errorf("unsupported file system type: %s", publishInfo.FilesystemType) - } - - return size, err -} - -func expandFilesystem(cmd string, cmdArguments string, tmpMountPoint string) (int64, error) { - logFields := log.Fields{ - "cmd": cmd, - "cmdArguments": cmdArguments, - "tmpMountPoint": tmpMountPoint, - } - log.WithFields(logFields).Debug(">>>> osutils.expandFilesystem") - defer log.WithFields(logFields).Debug("<<<< osutils.expandFilesystem") - - preExpandSize, err := getFilesystemSize(tmpMountPoint) - if err != nil { - return 0, err - } - _, err = execCommand(cmd, cmdArguments) - if err != nil { - log.Errorf("Expanding filesystem failed; %s", err) - return 0, err - } - - postExpandSize, err := getFilesystemSize(tmpMountPoint) - if err != nil { - return 0, err - } - - if postExpandSize == preExpandSize { - log.Warnf("Failed to expand filesystem; size=%d", postExpandSize) - } - - return postExpandSize, nil -} - -// iSCSISessionExists checks to see if a session exists to the specified portal. -func iSCSISessionExists(portal string) (bool, error) { - - log.Debug(">>>> osutils.iSCSISessionExists") - defer log.Debug("<<<< osutils.iSCSISessionExists") - - sessionInfo, err := getISCSISessionInfo() - if err != nil { - log.WithField("error", err).Error("Problem checking iSCSI sessions.") - return false, err - } - - for _, e := range sessionInfo { - if e.PortalIP == portal { - return true, nil - } - } - - return false, nil -} - -// iSCSISessionExistsToTargetIQN checks to see if a session exists to the specified target. -func iSCSISessionExistsToTargetIQN(targetIQN string) (bool, error) { - - log.Debug(">>>> osutils.iSCSISessionExistsToTargetIQN") - defer log.Debug("<<<< osutils.iSCSISessionExistsToTargetIQN") - - sessionInfo, err := getISCSISessionInfo() - if err != nil { - log.WithField("error", err).Error("Problem checking iSCSI sessions.") - return false, err - } - - for _, e := range sessionInfo { - if e.TargetName == targetIQN { - return true, nil - } - } - - return false, nil -} - -func ISCSIRescanDevices(targetIQN string, lunID int32, minSize int64) error { - fields := log.Fields{"targetIQN": targetIQN, "lunID": lunID} - log.WithFields(fields).Debug(">>>> osutils.ISCSIRescanDevices") - defer log.WithFields(fields).Debug("<<<< osutils.ISCSIRescanDevices") - - deviceInfo, err := getDeviceInfoForLUN(int(lunID), targetIQN) - if err != nil { - return fmt.Errorf("error getting iSCSI device information: %s", err) - } else if deviceInfo == nil { - return fmt.Errorf("could not get iSCSI device information for LUN: %d", lunID) - } - - allLargeEnough := true - for _, diskDevice := range deviceInfo.Devices { - size, err := getISCSIDiskSize("/dev/" + diskDevice) - if err != nil { - return err - } - if size < minSize { - allLargeEnough = false - } else { - continue - } - - err = iSCSIRescanDisk(diskDevice) - if err != nil { - log.WithField("diskDevice", diskDevice).Error("Failed to rescan disk.") - return fmt.Errorf("failed to rescan disk %s: %s", diskDevice, err) - } - } - - if !allLargeEnough { - time.Sleep(time.Second) - for _, diskDevice := range deviceInfo.Devices { - size, err := getISCSIDiskSize("/dev/" + diskDevice) - if err != nil { - return err - } - if size < minSize { - log.Error("Disk size not large enough after resize.") - return fmt.Errorf("disk size not large enough after resize: %d, %d", size, minSize) - } - } - } - - if deviceInfo.MultipathDevice != "" { - multipathDevice := deviceInfo.MultipathDevice - size, err := getISCSIDiskSize("/dev/" + multipathDevice) - if err != nil { - return err - } - - if size < minSize { - err := reloadMultipathDevice(multipathDevice) - if err != nil { - return err - } - time.Sleep(time.Second) - size, err = getISCSIDiskSize("/dev/" + multipathDevice) - if err != nil { - return err - } - if size < minSize { - log.Error("Multipath device not large enough after resize.") - return fmt.Errorf("multipath device not large enough after resize: %d < %d", size, minSize) - } - } - } - - return nil -} - -func reloadMultipathDevice(multipathDevice string) error { - fields := log.Fields{"multipathDevice": multipathDevice} - log.WithFields(fields).Debug(">>>> osutils.reloadMultipathDevice") - defer log.WithFields(fields).Debug("<<<< osutils.reloadMultipathDevice") - - if multipathDevice == "" { - return fmt.Errorf("cannot reload an empty multipathDevice") - } - - _, err := execCommandWithTimeout("multipath", 30, "-r", "/dev/"+multipathDevice) - if err != nil { - log.WithFields(log.Fields{ - "device": multipathDevice, - "error": err, - }).Error("Failed to reload multipathDevice.") - return fmt.Errorf("failed to reload multipathDevice %s: %s", multipathDevice, err) - } - - log.WithFields(fields).Debug("Multipath device reloaded.") - return nil -} - -// iSCSIRescanDisk causes the kernel to rescan a single iSCSI disk/block device. -// This is how size changes are found when expanding a volume. -func iSCSIRescanDisk(deviceName string) error { - fields := log.Fields{"deviceName": deviceName} - log.WithFields(fields).Debug(">>>> osutils.iSCSIRescanDisk") - defer log.WithFields(fields).Debug("<<<< osutils.iSCSIRescanDisk") - - filename := fmt.Sprintf(chrootPathPrefix+"/sys/block/%s/device/rescan", deviceName) - log.WithField("filename", filename).Debug("Opening file for writing.") - - f, err := os.OpenFile(filename, os.O_WRONLY, 0) - if err != nil { - log.WithField("file", filename).Warning("Could not open file for writing.") - return err - } - defer f.Close() - - written, err := f.WriteString("1") - if err != nil { - log.WithFields(log.Fields{ - "file": filename, - "error": err, - }).Warning("Could not write to file.") - return err - } else if written == 0 { - log.WithField("file", filename).Warning("Zero bytes written to file.") - return fmt.Errorf("no data written to %s", filename) - } - - return nil -} - -// iSCSIScanTargetLUN scans a single LUN on an iSCSI target to discover it. -func iSCSIScanTargetLUN(lunID int, hosts []int) error { - - fields := log.Fields{"hosts": hosts, "lunID": lunID} - log.WithFields(fields).Debug(">>>> osutils.iSCSIScanTargetLUN") - defer log.WithFields(fields).Debug("<<<< osutils.iSCSIScanTargetLUN") - - var ( - f *os.File - err error - ) - - for _, hostNumber := range hosts { - - filename := fmt.Sprintf(chrootPathPrefix+"/sys/class/scsi_host/host%d/scan", hostNumber) - if f, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0200); err != nil { - log.WithField("file", filename).Warning("Could not open file for writing.") - return err - } - - scanCmd := fmt.Sprintf("0 0 %d", lunID) - if written, err := f.WriteString(scanCmd); err != nil { - log.WithFields(log.Fields{"file": filename, "error": err}).Warning("Could not write to file.") - f.Close() - return err - } else if written == 0 { - log.WithField("file", filename).Warning("No data written to file.") - f.Close() - return fmt.Errorf("no data written to %s", filename) - } - - f.Close() - - log.WithFields(log.Fields{ - "scanCmd": scanCmd, - "scanFile": filename, - }).Debug("Invoked single-LUN scan.") - } - - return nil -} - -// IsAlreadyAttached checks if there is already an established iSCSI session to the specified LUN. -func IsAlreadyAttached(lunID int, targetIqn string) bool { - - hostSessionMap := getISCSIHostSessionMapForTarget(targetIqn) - if len(hostSessionMap) == 0 { - return false - } - - paths := getSysfsBlockDirsForLUN(lunID, hostSessionMap) - - devices, err := getDevicesForLUN(paths) - if nil != err { - return false - } - - return 0 < len(devices) -} - -// getISCSIHostSessionMapForTarget returns a map of iSCSI host numbers to iSCSI session numbers -// for a given iSCSI target. -func getISCSIHostSessionMapForTarget(iSCSINodeName string) map[int]int { - - fields := log.Fields{"iSCSINodeName": iSCSINodeName} - log.WithFields(fields).Debug(">>>> osutils.getISCSIHostSessionMapForTarget") - defer log.WithFields(fields).Debug("<<<< osutils.getISCSIHostSessionMapForTarget") - - var ( - hostNumber int - sessionNumber int - ) - - hostSessionMap := make(map[int]int) - - sysPath := chrootPathPrefix + "/sys/class/iscsi_host/" - if hostDirs, err := ioutil.ReadDir(sysPath); err != nil { - log.WithField("error", err).Errorf("Could not read %s", sysPath) - return hostSessionMap - } else { - for _, hostDir := range hostDirs { - - hostName := hostDir.Name() - if !strings.HasPrefix(hostName, "host") { - continue - } else if hostNumber, err = strconv.Atoi(strings.TrimPrefix(hostName, "host")); err != nil { - log.WithField("host", hostName).Error("Could not parse host number") - continue - } - - devicePath := sysPath + hostName + "/device/" - if deviceDirs, err := ioutil.ReadDir(devicePath); err != nil { - log.WithFields(log.Fields{ - "error": err, - "devicePath": devicePath, - }).Error("Could not read device path.") - return hostSessionMap - } else { - for _, deviceDir := range deviceDirs { - - sessionName := deviceDir.Name() - if !strings.HasPrefix(sessionName, "session") { - continue - } else if sessionNumber, err = strconv.Atoi(strings.TrimPrefix(sessionName, "session")); err != nil { - log.WithField("session", sessionName).Error("Could not parse session number") - continue - } - - targetNamePath := devicePath + sessionName + "/iscsi_session/" + sessionName + "/targetname" - if targetName, err := ioutil.ReadFile(targetNamePath); err != nil { - - log.WithFields(log.Fields{ - "path": targetNamePath, - "error": err, - }).Error("Could not read targetname file") - - } else if strings.TrimSpace(string(targetName)) == iSCSINodeName { - - log.WithFields(log.Fields{ - "hostNumber": hostNumber, - "sessionNumber": sessionNumber, - }).Debug("Found iSCSI host/session.") - - hostSessionMap[hostNumber] = sessionNumber - } - } - } - } - } - - return hostSessionMap -} - -// GetISCSIDevices returns a list of iSCSI devices that are attached to (but not necessarily mounted on) this host. -func GetISCSIDevices() ([]*ScsiDeviceInfo, error) { - - log.Debug(">>>> osutils.GetISCSIDevices") - defer log.Debug("<<<< osutils.GetISCSIDevices") - - devices := make([]*ScsiDeviceInfo, 0) - hostSessionMapCache := make(map[string]map[int]int) - - // Start by reading the sessions from /sys/class/iscsi_session - sysPath := chrootPathPrefix + "/sys/class/iscsi_session/" - sessionDirs, err := ioutil.ReadDir(sysPath) - if err != nil { - log.WithField("error", err).Errorf("Could not read %s", sysPath) - return nil, err - } - - // Loop through each of the iSCSI sessions - for _, sessionDir := range sessionDirs { - - sessionName := sessionDir.Name() - if !strings.HasPrefix(sessionName, "session") { - continue - } else if _, err = strconv.Atoi(strings.TrimPrefix(sessionName, "session")); err != nil { - log.WithField("session", sessionName).Error("Could not parse session number") - return nil, err - } - - // Find the target IQN from the session at /sys/class/iscsi_session/sessionXXX/targetname - sessionPath := sysPath + sessionName - targetNamePath := sessionPath + "/targetname" - targetNameBytes, err := ioutil.ReadFile(targetNamePath) - if err != nil { - log.WithFields(log.Fields{ - "path": targetNamePath, - "error": err, - }).Error("Could not read targetname file") - return nil, err - } - - targetIQN := strings.TrimSpace(string(targetNameBytes)) - - log.WithFields(log.Fields{ - "targetIQN": targetIQN, - "sessionName": sessionName, - }).Debug("Found iSCSI session / target IQN.") - - // Find the one target at /sys/class/iscsi_session/sessionXXX/device/targetHH:BB:DD (host:bus:device) - sessionDevicePath := sessionPath + "/device/" - targetDirs, err := ioutil.ReadDir(sessionDevicePath) - if err != nil { - log.WithField("error", err).Errorf("Could not read %s", sessionDevicePath) - return nil, err - } - - // Get the one target directory - hostBusDeviceName := "" - targetDirName := "" - for _, targetDir := range targetDirs { - - targetDirName = targetDir.Name() - - if strings.HasPrefix(targetDirName, "target") { - hostBusDeviceName = strings.TrimPrefix(targetDirName, "target") - break - } - } - - if hostBusDeviceName == "" { - log.Warningf("Could not find a host:bus:device directory at %s", sessionDevicePath) - continue - } - - sessionDeviceHBDPath := sessionDevicePath + targetDirName + "/" - - log.WithFields(log.Fields{ - "hbdPath": sessionDeviceHBDPath, - "hbdName": hostBusDeviceName, - }).Debug("Found host/bus/device path.") - - // Find the devices at /sys/class/iscsi_session/sessionXXX/device/targetHH:BB:DD/HH:BB:DD:LL (host:bus:device:lun) - hostBusDeviceLunDirs, err := ioutil.ReadDir(sessionDeviceHBDPath) - if err != nil { - log.WithField("error", err).Errorf("Could not read %s", sessionDeviceHBDPath) - return nil, err - } - - for _, hostBusDeviceLunDir := range hostBusDeviceLunDirs { - - hostBusDeviceLunDirName := hostBusDeviceLunDir.Name() - if !strings.HasPrefix(hostBusDeviceLunDirName, hostBusDeviceName) { - continue - } - - sessionDeviceHBDLPath := sessionDeviceHBDPath + hostBusDeviceLunDirName + "/" - - log.WithFields(log.Fields{ - "hbdlPath": sessionDeviceHBDLPath, - "hbdlName": hostBusDeviceLunDirName, - }).Debug("Found host/bus/device/LUN path.") - - hbdlValues := strings.Split(hostBusDeviceLunDirName, ":") - if len(hbdlValues) != 4 { - log.Errorf("Could not parse values from %s", hostBusDeviceLunDirName) - return nil, err - } - - hostNum := hbdlValues[0] - busNum := hbdlValues[1] - deviceNum := hbdlValues[2] - lunNum := hbdlValues[3] - - blockPath := sessionDeviceHBDLPath + "/block/" - - // Find the block device at /sys/class/iscsi_session/sessionXXX/device/targetHH:BB:DD/HH:BB:DD:LL/block - blockDeviceDirs, err := ioutil.ReadDir(blockPath) - if err != nil { - log.WithField("error", err).Errorf("Could not read %s", blockPath) - return nil, err - } - - for _, blockDeviceDir := range blockDeviceDirs { - - blockDeviceName := blockDeviceDir.Name() - - log.WithField("blockDeviceName", blockDeviceName).Debug("Found block device.") - - // Find multipath device, if any - var slaveDevices []string - multipathDevice := findMultipathDeviceForDevice(blockDeviceName) - if multipathDevice != "" { - slaveDevices = findDevicesForMultipathDevice(multipathDevice) - } else { - slaveDevices = []string{blockDeviceName} - } - - // Get the host/session map, using a cached value if available - hostSessionMap, ok := hostSessionMapCache[targetIQN] - if !ok { - hostSessionMap = getISCSIHostSessionMapForTarget(targetIQN) - hostSessionMapCache[targetIQN] = hostSessionMap - } - - log.WithFields(log.Fields{ - "host": hostNum, - "lun": lunNum, - "devices": slaveDevices, - "multipathDevice": multipathDevice, - "iqn": targetIQN, - "hostSessionMap": hostSessionMap, - }).Debug("Found iSCSI device.") - - device := &ScsiDeviceInfo{ - Host: hostNum, - Channel: busNum, - Target: deviceNum, - LUN: lunNum, - Devices: slaveDevices, - MultipathDevice: multipathDevice, - IQN: targetIQN, - HostSessionMap: hostSessionMap, - } - - devices = append(devices, device) - } - } - } - - return devices, nil -} - -// IsMounted verifies if the supplied device is attached at the supplied location. -func IsMounted(sourceDevice, mountpoint string) (bool, error) { - - fields := log.Fields{ - "source": sourceDevice, - "target": mountpoint, - } - log.WithFields(fields).Debug(">>>> osutils.IsMounted") - defer log.WithFields(fields).Debug("<<<< osutils.IsMounted") - - procSelfMountinfo, err := listProcSelfMountinfo(procSelfMountinfoPath) - - if err != nil { - log.WithFields(fields).Errorf("checking mounted failed; %s", err) - return false, fmt.Errorf("checking mounted failed; %s", err) - } - - var sourceDeviceName string - if sourceDevice != "" && strings.HasPrefix(sourceDevice, "/dev/") { - sourceDeviceName = strings.TrimPrefix(sourceDevice, "/dev/") - } - - for _, procMount := range procSelfMountinfo { - - if !strings.Contains(procMount.MountPoint, mountpoint) { - continue - } - - log.Debugf("Mountpoint found: %v", procMount) - - if sourceDevice == "" { - log.Debugf("Source device: none, Target: %s, is mounted: true", mountpoint) - return true, nil - } - - hasDevMountSourcePrefix := strings.HasPrefix(procMount.MountSource, "/dev/") - - var mountedDevice string - // Resolve any symlinks to get the real device - if hasDevMountSourcePrefix { - device, err := filepath.EvalSymlinks(procMount.MountSource) - if err != nil { - log.Error(err) - continue - } - mountedDevice = strings.TrimPrefix(device, "/dev/") - } else { - mountedDevice = strings.TrimPrefix(procMount.Root, "/") - } - - if sourceDeviceName == mountedDevice { - log.Debugf("Source device: %s, Target: %s, is mounted: true", sourceDeviceName, mountpoint) - return true, nil - } - } - - log.Debugf("Source device: %s, Target: %s, is mounted: false", sourceDevice, mountpoint) - return false, nil -} - -// GetMountedISCSIDevices returns a list of iSCSI devices that are *mounted* on this host. -func GetMountedISCSIDevices() ([]*ScsiDeviceInfo, error) { - - log.Debug(">>>> osutils.GetMountedISCSIDevices") - defer log.Debug("<<<< osutils.GetMountedISCSIDevices") - - procSelfMountinfo, err := listProcSelfMountinfo(procSelfMountinfoPath) - if err != nil { - return nil, err - } - - // Get a list of all mounted /dev devices - mountedDevices := make([]string, 0) - for _, procMount := range procSelfMountinfo { - - hasDevMountSourcePrefix := strings.HasPrefix(procMount.MountSource, "/dev/") - hasPvcMountPoint := strings.Contains(procMount.MountPoint, "/pvc-") - - if !hasPvcMountPoint { - continue - } - - var mountedDevice string - // Resolve any symlinks to get the real device - if hasDevMountSourcePrefix { - device, err := filepath.EvalSymlinks(procMount.MountSource) - if err != nil { - log.Error(err) - continue - } - mountedDevice = strings.TrimPrefix(device, "/dev/") - } else { - mountedDevice = strings.TrimPrefix(procMount.Root, "/") - } - - mountedDevices = append(mountedDevices, mountedDevice) - } - - // Get all known iSCSI devices - iscsiDevices, err := GetISCSIDevices() - if err != nil { - return nil, err - } - - mountedISCSIDevices := make([]*ScsiDeviceInfo, 0) - - // For each mounted device, look for a matching iSCSI device - for _, mountedDevice := range mountedDevices { - iSCSIDeviceLoop: - for _, iscsiDevice := range iscsiDevices { - - // First look for a multipath device match - if mountedDevice == iscsiDevice.MultipathDevice { - mountedISCSIDevices = append(mountedISCSIDevices, iscsiDevice) - break iSCSIDeviceLoop - - } else { - - // Then look for a slave device match - for _, iscsiSlaveDevice := range iscsiDevice.Devices { - if mountedDevice == iscsiSlaveDevice { - mountedISCSIDevices = append(mountedISCSIDevices, iscsiDevice) - break iSCSIDeviceLoop - } - } - } - } - } - - for _, md := range mountedISCSIDevices { - log.WithFields(log.Fields{ - "host": md.Host, - "lun": md.LUN, - "devices": md.Devices, - "multipathDevice": md.MultipathDevice, - "iqn": md.IQN, - "hostSessionMap": md.HostSessionMap, - }).Debug("Found mounted iSCSI device.") - } - - return mountedISCSIDevices, nil -} - -// ISCSITargetHasMountedDevice returns true if this host has any mounted devices on the specified target. -func ISCSITargetHasMountedDevice(targetIQN string) (bool, error) { - - mountedISCSIDevices, err := GetMountedISCSIDevices() - if err != nil { - return false, err - } - - for _, device := range mountedISCSIDevices { - if device.IQN == targetIQN { - return true, nil - } - } - - return false, nil -} - -// multipathFlushDevice invokes the 'multipath' commands to flush paths for a single device. -func multipathFlushDevice(deviceInfo *ScsiDeviceInfo) { - - log.WithField("device", deviceInfo.MultipathDevice).Debug(">>>> osutils.multipathFlushDevice") - defer log.Debug("<<<< osutils.multipathFlushDevice") - - if deviceInfo.MultipathDevice == "" { - return - } - - _, err := execCommandWithTimeout("multipath", 30, "-f", "/dev/"+deviceInfo.MultipathDevice) - if err != nil { - // nothing to do if it generates an error but log it - log.WithFields(log.Fields{ - "device": deviceInfo.MultipathDevice, - "error": err, - }).Warning("Error encountered in multipath flush device command.") - } -} - -// flushDevice flushes any outstanding I/O to all paths to a device. -func flushDevice(deviceInfo *ScsiDeviceInfo) { - - log.Debug(">>>> osutils.flushDevice") - defer log.Debug("<<<< osutils.flushDevice") - - for _, device := range deviceInfo.Devices { - _, err := execCommandWithTimeout("blockdev", 5, "--flushbufs", "/dev/"+device) - if err != nil { - // nothing to do if it generates an error but log it - log.WithFields(log.Fields{ - "device": device, - "error": err, - }).Warning("Error encountered in blockdev --flushbufs command.") - } - } -} - -// removeDevice tells Linux that a device will be removed. -func removeDevice(deviceInfo *ScsiDeviceInfo) { - - log.Debug(">>>> osutils.removeDevice") - defer log.Debug("<<<< osutils.removeDevice") - - var ( - f *os.File - err error - ) - - for _, deviceName := range deviceInfo.Devices { - - filename := fmt.Sprintf(chrootPathPrefix+"/sys/block/%s/device/delete", deviceName) - if f, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0200); err != nil { - log.WithField("file", filename).Warning("Could not open file for writing.") - return - } - - if written, err := f.WriteString("1"); err != nil { - log.WithFields(log.Fields{"file": filename, "error": err}).Warning("Could not write to file.") - f.Close() - return - } else if written == 0 { - log.WithField("file", filename).Warning("No data written to file.") - f.Close() - return - } - - f.Close() - - log.WithField("scanFile", filename).Debug("Invoked device delete.") - } -} - -// multipathdIsRunning returns true if the multipath daemon is running. -func multipathdIsRunning() bool { - - log.Debug(">>>> osutils.multipathdIsRunning") - defer log.Debug("<<<< osutils.multipathdIsRunning") - - out, err := execCommand("pgrep", "multipathd") - if err == nil { - pid := strings.TrimSpace(string(out)) - if pidRegex.MatchString(pid) { - log.WithField("pid", pid).Debug("multipathd is running") - return true - } - } else { - log.Error(err) - } - - out, err = execCommand("multipathd", "show", "daemon") - if err == nil { - if pidRunningRegex.MatchString(string(out)) { - log.Debug("multipathd is running") - return true - } - } else { - log.Error(err) - } - - return false -} - -// getFSType returns the filesystem for the supplied device. -func getFSType(device string) string { - - log.WithField("device", device).Debug(">>>> osutils.getFSType") - defer log.Debug("<<<< osutils.getFSType") - - fsType := "" - out, err := execCommand("blkid", device) - if err != nil { - log.WithField("device", device).Debug("Could not get FSType for device.") - return fsType - } - - if strings.Contains(string(out), "TYPE=") { - for _, v := range strings.Split(string(out), " ") { - if strings.Contains(v, "TYPE=") { - fsType = strings.Split(v, "=")[1] - fsType = strings.Replace(fsType, "\"", "", -1) - fsType = strings.TrimSpace(fsType) - } - } - } - return fsType -} - -// formatVolume creates a filesystem for the supplied device of the supplied type. -func formatVolume(device, fstype string) error { - - logFields := log.Fields{"device": device, "fsType": fstype} - log.WithFields(logFields).Debug(">>>> osutils.formatVolume") - defer log.WithFields(logFields).Debug("<<<< osutils.formatVolume") - - maxDuration := 30 * time.Second - - formatVolume := func() error { - - var err error - - switch fstype { - case "xfs": - _, err = execCommand("mkfs.xfs", "-f", device) - case "ext3": - _, err = execCommand("mkfs.ext3", "-F", device) - case "ext4": - _, err = execCommand("mkfs.ext4", "-F", device) - default: - return fmt.Errorf("unsupported file system type: %s", fstype) - } - - return err - } - - formatNotify := func(err error, duration time.Duration) { - log.WithField("increment", duration).Debug("Format failed, retrying.") - } - - formatBackoff := backoff.NewExponentialBackOff() - formatBackoff.InitialInterval = 2 * time.Second - formatBackoff.Multiplier = 2 - formatBackoff.RandomizationFactor = 0.1 - formatBackoff.MaxElapsedTime = maxDuration - - // Run the check/scan using an exponential backoff - if err := backoff.RetryNotify(formatVolume, formatBackoff, formatNotify); err != nil { - log.Warnf("Could not format device after %3.2f seconds.", maxDuration.Seconds()) - return err - } - - log.WithFields(logFields).Info("Device formatted.") - return nil -} - -// MountDevice attaches the supplied device at the supplied location. Use this for iSCSI devices. -func MountDevice(device, mountpoint, options string, isMountPointFile bool) (err error) { - - log.WithFields(log.Fields{ - "device": device, - "mountpoint": mountpoint, - "options": options, - }).Debug(">>>> osutils.MountDevice") - defer log.Debug("<<<< osutils.MountDevice") - - // Build the command - var args []string - if len(options) > 0 { - args = []string{"-o", strings.TrimPrefix(options, "-o "), device, mountpoint} - } else { - args = []string{device, mountpoint} - } - - mounted, _ := IsMounted(device, mountpoint) - exists := PathExists(mountpoint) - - log.Debugf("Already mounted: %v, mountpoint exists: %v", mounted, exists) - - if !exists { - if isMountPointFile { - if err = EnsureFileExists(mountpoint); err != nil { - log.WithField("error", err).Warning("File check failed.") - } - } else { - if err = EnsureDirExists(mountpoint); err != nil { - log.WithField("error", err).Warning("Mkdir failed.") - } - } - } - - if !mounted { - if _, err = execCommand("mount", args...); err != nil { - log.WithField("error", err).Error("Mount failed.") - } - } - - return -} - -// mountNFSPath attaches the supplied NFS share at the supplied location with options. -func mountNFSPath(exportPath, mountpoint, options string) (err error) { - - log.WithFields(log.Fields{ - "exportPath": exportPath, - "mountpoint": mountpoint, - "options": options, - }).Debug(">>>> osutils.mountNFSPath") - defer log.Debug("<<<< osutils.mountNFSPath") - - // Build the command - var args []string - if len(options) > 0 { - args = []string{"-t", "nfs", "-o", strings.TrimPrefix(options, "-o "), exportPath, mountpoint} - } else { - args = []string{"-t", "nfs", exportPath, mountpoint} - } - - // Create the mount point dir if necessary - if _, err = execCommand("mkdir", "-p", mountpoint); err != nil { - log.WithField("error", err).Warning("Mkdir failed.") - } - - if out, err := execCommand("mount", args...); err != nil { - log.WithField("output", string(out)).Debug("Mount failed.") - return fmt.Errorf("error mounting NFS volume %v on mountpoint %v: %v", exportPath, mountpoint, err) - } - - return nil -} - -// Umount detaches from the supplied location. -func Umount(mountpoint string) (err error) { - - log.WithField("mountpoint", mountpoint).Debug(">>>> osutils.Umount") - defer log.Debug("<<<< osutils.Umount") - - if _, err = execCommand("umount", mountpoint); err != nil { - log.WithField("error", err).Error("Umount failed.") - } - return -} - -// loginISCSITarget logs in to an iSCSI target. -func loginISCSITarget(iqn, portal string) error { - - log.WithFields(log.Fields{ - "IQN": iqn, - "Portal": portal, - }).Debug(">>>> osutils.loginISCSITarget") - defer log.Debug("<<<< osutils.loginISCSITarget") - - args := []string{"-m", "node", "-T", iqn, "-l", "-p", portal + ":3260"} - - if _, err := execIscsiadmCommand(args...); err != nil { - log.WithField("error", err).Error("Error logging in to iSCSI target.") - return err - } - return nil -} - -// loginWithChap will login to the iSCSI target with the supplied credentials. -func loginWithChap(tiqn, portal, username, password, iface string, logSensitiveInfo bool) error { - - logFields := log.Fields{ - "IQN": tiqn, - "portal": portal, - "username": username, - "password": "****", - "iface": iface, - } - if logSensitiveInfo { - logFields["password"] = password - } - log.WithFields(logFields).Debug(">>>> osutils.loginWithChap") - defer log.Debug("<<<< osutils.loginWithChap") - - args := []string{"-m", "node", "-T", tiqn, "-p", portal + ":3260"} - - createArgs := append(args, []string{"--interface", iface, "--op", "new"}...) - if _, err := execIscsiadmCommand(createArgs...); err != nil { - log.Error("Error running iscsiadm node create.") - return err - } - - authMethodArgs := append(args, []string{"--op=update", "--name", "node.session.auth.authmethod", "--value=CHAP"}...) - if _, err := execIscsiadmCommand(authMethodArgs...); err != nil { - log.Error("Error running iscsiadm set authmethod.") - return err - } - - authUserArgs := append(args, []string{"--op=update", "--name", "node.session.auth.username", "--value=" + username}...) - if _, err := execIscsiadmCommand(authUserArgs...); err != nil { - log.Error("Error running iscsiadm set authuser.") - return err - } - - authPasswordArgs := append(args, []string{"--op=update", "--name", "node.session.auth.password", "--value=" + password}...) - if _, err := execIscsiadmCommand(authPasswordArgs...); err != nil { - log.Error("Error running iscsiadm set authpassword.") - return err - } - - loginArgs := append(args, []string{"--login"}...) - if _, err := execIscsiadmCommand(loginArgs...); err != nil { - log.Error("Error running iscsiadm login.") - return err - } - - return nil -} - -func EnsureISCSISessions(hostDataIPs []string) error { - for _, ip := range hostDataIPs { - if err := EnsureISCSISession(ip); nil != err { - return err - } - } - return nil -} - -func EnsureISCSISession(hostDataIP string) error { - - log.WithField("hostDataIP", hostDataIP).Debug(">>>> osutils.EnsureISCSISession") - defer log.Debug("<<<< osutils.EnsureISCSISession") - - // Ensure iSCSI is supported on system - if !ISCSISupported() { - return errors.New("iSCSI support not detected") - } - - // Ensure iSCSI session exists for the specified iSCSI portal - sessionExists, err := iSCSISessionExists(hostDataIP) - if err != nil { - return fmt.Errorf("could not check for iSCSI session: %v", err) - } - if !sessionExists { - - // Run discovery in case we haven't seen this target from this host - targets, err := iSCSIDiscovery(hostDataIP) - if err != nil { - return fmt.Errorf("could not run iSCSI discovery: %v", err) - } - if len(targets) == 0 { - return errors.New("iSCSI discovery found no targets") - } - - log.WithFields(log.Fields{ - "Targets": targets, - }).Debug("Found matching iSCSI targets.") - - // Determine which target matches the portal we requested - targetIndex := -1 - for i, target := range targets { - if target.PortalIP == hostDataIP { - targetIndex = i - break - } - } - - if targetIndex == -1 { - return fmt.Errorf("iSCSI discovery found no targets with portal %s", hostDataIP) - } - - // To enable multipath, log in to each discovered target with the same IQN (target name) - targetName := targets[targetIndex].TargetName - for _, target := range targets { - if target.TargetName == targetName { - - // Log in to target - err = loginISCSITarget(target.TargetName, target.PortalIP) - if err != nil { - return fmt.Errorf("login to iSCSI target failed: %v", err) - } - } - } - - // Recheck to ensure a session is now open - sessionExists, err = iSCSISessionExists(hostDataIP) - if err != nil { - return fmt.Errorf("could not recheck for iSCSI session: %v", err) - } - if !sessionExists { - return fmt.Errorf("expected iSCSI session %v NOT found, please login to the iSCSI portal", hostDataIP) - } - } - - log.WithField("hostDataIP", hostDataIP).Debug("Found session to iSCSI portal.") - - return nil -} - -// execIscsiadmCommand uses the 'iscsiadm' command to perform operations -func execIscsiadmCommand(args ...string) ([]byte, error) { - return execCommand("iscsiadm", args...) -} - -// execCommand invokes an external process -func execCommand(name string, args ...string) ([]byte, error) { - - log.WithFields(log.Fields{ - "command": name, - "args": args, - }).Debug(">>>> osutils.execCommand.") - - out, err := exec.Command(name, args...).CombinedOutput() - - log.WithFields(log.Fields{ - "command": name, - "output": sanitizeString(string(out)), - "error": err, - }).Debug("<<<< osutils.execCommand.") - - return out, err -} - -// execCommandResult is used to return shell command results via channels between goroutines -type execCommandResult struct { - Output []byte - Error error -} - -// execCommand invokes an external shell command -func execCommandWithTimeout(name string, timeoutSeconds time.Duration, args ...string) ([]byte, error) { - - timeout := timeoutSeconds * time.Second - - log.WithFields(log.Fields{ - "command": name, - "timeoutSeconds": timeout, - "args": args, - }).Debug(">>>> osutils.execCommandWithTimeout.") - - cmd := exec.Command(name, args...) - done := make(chan execCommandResult, 1) - var result execCommandResult - - go func() { - out, err := cmd.CombinedOutput() - done <- execCommandResult{Output: out, Error: err} - }() - - select { - case <-time.After(timeout): - if err := cmd.Process.Kill(); err != nil { - log.WithFields(log.Fields{ - "process": name, - "error": err, - }).Error("failed to kill process") - result = execCommandResult{Output: nil, Error: err} - } else { - log.WithFields(log.Fields{ - "process": name, - }).Error("process killed after timeout") - result = execCommandResult{Output: nil, Error: errors.New("process killed after timeout")} - } - case result = <-done: - break - } - - log.WithFields(log.Fields{ - "command": name, - "output": sanitizeString(string(result.Output)), - "error": result.Error, - }).Debug("<<<< osutils.execCommandWithTimeout.") - - return result.Output, result.Error -} - -func sanitizeString(s string) string { - // Strip xterm color & movement characters - s = xtermControlRegex.ReplaceAllString(s, "") - // Strip trailing newline - s = strings.TrimSuffix(s, "\n") - return s -} diff --git a/vendor/github.com/netapp/trident/utils/osutils_darwin.go b/vendor/github.com/netapp/trident/utils/osutils_darwin.go deleted file mode 100644 index 84da95710..000000000 --- a/vendor/github.com/netapp/trident/utils/osutils_darwin.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package utils - -import ( - "errors" - - log "github.com/sirupsen/logrus" -) - -// The Trident build process builds the Trident CLI client for both linux and darwin. -// At compile time golang will type checks the entire code base. Since the CLI is part -// of the Trident code base this file exists to handle darwin specific code. - -func getFilesystemSize(path string) (int64, error) { - log.Debug(">>>> osutils_darwin.getFilesystemSize") - defer log.Debug("<<<< osutils_darwin.getFilesystemSize") - return 0, errors.New("getFilesystemSize is not supported for darwin") -} - -func getISCSIDiskSize(devicePath string) (int64, error) { - log.Debug(">>>> osutils_darwin.getISCSIDiskSize") - defer log.Debug("<<<< osutils_darwin.getISCSIDiskSize") - return 0, errors.New("getBlockSize is not supported for darwin") -} diff --git a/vendor/github.com/netapp/trident/utils/osutils_linux.go b/vendor/github.com/netapp/trident/utils/osutils_linux.go deleted file mode 100644 index 9428a8bdc..000000000 --- a/vendor/github.com/netapp/trident/utils/osutils_linux.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package utils - -import ( - "fmt" - "os" - "syscall" - "unsafe" - - log "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// getFilesystemSize returns the size of the filesystem for the given path. -// The caller of the func is responsible for verifying the mountPoint existence and readiness. -func getFilesystemSize(path string) (int64, error) { - log.Debug(">>>> osutils_linux.getFilesystemSize") - defer log.Debug("<<<< osutils_linux.getFilesystemSize") - - // Warning: syscall.Statfs_t uses types that are OS and arch dependent. The following code has been - // confirmed to work with Linux/amd64 and Darwin/amd64. - var buf syscall.Statfs_t - err := syscall.Statfs(path, &buf) - if err != nil { - log.WithField("path", path).Errorf("Failed to statfs: %s", err) - return 0, fmt.Errorf("couldn't get filesystem stats %s: %s", path, err) - } - - size := int64(buf.Blocks) * buf.Bsize - log.WithFields(log.Fields{ - "path": path, - "size": size, - "bsize": buf.Bsize, - "blocks": buf.Blocks, - "avail": buf.Bavail, - "free": buf.Bfree, - }).Debug("Filesystem size information") - return size, nil -} - -// getISCSIDiskSize queries the current block size in bytes -func getISCSIDiskSize(devicePath string) (int64, error) { - fields := log.Fields{"devicePath": devicePath} - log.WithFields(fields).Debug(">>>> osutils_linux.getISCSIDiskSize") - defer log.WithFields(fields).Debug("<<<< osutils_linux.getISCSIDiskSize") - - disk, err := os.Open(devicePath) - if err != nil { - log.Error("Failed to open disk.") - return 0, fmt.Errorf("failed to open disk %s: %s", devicePath, err) - } - defer disk.Close() - - var size int64 - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, disk.Fd(), unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&size))) - if errno != 0 { - err := os.NewSyscallError("ioctl", errno) - log.Error("BLKGETSIZE64 ioctl failed") - return 0, fmt.Errorf("BLKGETSIZE64 ioctl failed %s: %s", devicePath, err) - } - - return size, nil -} diff --git a/vendor/github.com/netapp/trident/utils/types.go b/vendor/github.com/netapp/trident/utils/types.go deleted file mode 100644 index e70df80cb..000000000 --- a/vendor/github.com/netapp/trident/utils/types.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 NetApp, Inc. All Rights Reserved. - -package utils - -type VolumeAccessInfo struct { - IscsiAccessInfo - NfsAccessInfo - MountOptions string `json:"mountOptions,omitempty"` -} - -type IscsiAccessInfo struct { - IscsiTargetPortal string `json:"iscsiTargetPortal,omitempty"` - IscsiPortals []string `json:"iscsiPortals,omitempty"` - IscsiTargetIQN string `json:"iscsiTargetIqn,omitempty"` - IscsiLunNumber int32 `json:"iscsiLunNumber,omitempty"` - IscsiInterface string `json:"iscsiInterface,omitempty"` - IscsiIgroup string `json:"iscsiIgroup,omitempty"` - IscsiVAGs []int64 `json:"iscsiVags,omitempty"` - IscsiUsername string `json:"iscsiUsername,omitempty"` - IscsiInitiatorSecret string `json:"iscsiInitiatorSecret,omitempty"` - IscsiTargetSecret string `json:"iscsiTargetSecret,omitempty"` -} - -type NfsAccessInfo struct { - NfsServerIP string `json:"nfsServerIp,omitempty"` - NfsPath string `json:"nfsPath,omitempty"` -} - -type VolumePublishInfo struct { - Localhost bool `json:"localhost,omitempty"` - HostIQN []string `json:"hostIQN,omitempty"` - HostIP []string `json:"hostIP,omitempty"` - HostName string `json:"hostName,omitempty"` - FilesystemType string `json:"fstype,omitempty"` - UseCHAP bool `json:"useCHAP,omitempty"` - SharedTarget bool `json:"sharedTarget,omitempty"` - DevicePath string `json:"devicePath,omitempty"` - VolumeAccessInfo -} - -type VolumeTrackingPublishInfo struct { - StagingTargetPath string `json:"stagingTargetPath` -} - -type Node struct { - Name string `json:"name"` - IQN string `json:"iqn,omitempty"` - IPs []string `json:"ips,omitempty"` -} diff --git a/vendor/github.com/netapp/trident/utils/utils.go b/vendor/github.com/netapp/trident/utils/utils.go deleted file mode 100644 index 18ebb9e62..000000000 --- a/vendor/github.com/netapp/trident/utils/utils.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2018 NetApp, Inc. All Rights Reserved. - -package utils - -import ( - "fmt" - "math/rand" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - log "github.com/sirupsen/logrus" -) - -// Linux is a constant value for the runtime.GOOS that represents the Linux OS -const Linux = "linux" - -// Windows is a constant value for the runtime.GOOS that represents the Windows OS -const Windows = "windows" - -// Darwin is a constant value for the runtime.GOOS that represents Apple MacOS -const Darwin = "darwin" - -///////////////////////////////////////////////////////////////////////////// -// -// Binary units -// -///////////////////////////////////////////////////////////////////////////// - -type sizeUnit2 []string - -func (s sizeUnit2) Len() int { - return len(s) -} -func (s sizeUnit2) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sizeUnit2) Less(i, j int) bool { - return len(s[i]) > len(s[j]) -} - -var lookupTable2 = make(map[string]int) -var units2 = sizeUnit2{} - -func init() { - // populate the lookup table for binary suffixes - lookupTable2["k"] = 1 - lookupTable2["ki"] = 1 - lookupTable2["kib"] = 1 - lookupTable2["m"] = 2 - lookupTable2["mi"] = 2 - lookupTable2["mib"] = 2 - lookupTable2["g"] = 3 - lookupTable2["gi"] = 3 - lookupTable2["gib"] = 3 - lookupTable2["t"] = 4 - lookupTable2["ti"] = 4 - lookupTable2["tib"] = 4 - lookupTable2["p"] = 5 - lookupTable2["pi"] = 5 - lookupTable2["pib"] = 5 - lookupTable2["e"] = 6 - lookupTable2["ei"] = 6 - lookupTable2["eib"] = 6 - lookupTable2["z"] = 7 - lookupTable2["zi"] = 7 - lookupTable2["zib"] = 7 - lookupTable2["y"] = 8 - lookupTable2["yi"] = 8 - lookupTable2["yib"] = 8 - - // The slice of units is used to ensure that they are accessed by suffix from longest to - // shortest, i.e. match 'tib' before matching 'b'. - for unit := range lookupTable2 { - units2 = append(units2, unit) - } - sort.Sort(units2) -} - -///////////////////////////////////////////////////////////////////////////// -// -// SI units -// -///////////////////////////////////////////////////////////////////////////// - -type sizeUnit10 []string - -func (s sizeUnit10) Len() int { - return len(s) -} -func (s sizeUnit10) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sizeUnit10) Less(i, j int) bool { - return len(s[i]) > len(s[j]) -} - -var lookupTable10 = make(map[string]int) -var units10 = sizeUnit10{} - -func init() { - // populate the lookup table for SI suffixes - lookupTable10["b"] = 0 - lookupTable10["bytes"] = 0 - lookupTable10["kb"] = 1 - lookupTable10["mb"] = 2 - lookupTable10["gb"] = 3 - lookupTable10["tb"] = 4 - lookupTable10["pb"] = 5 - lookupTable10["eb"] = 6 - lookupTable10["zb"] = 7 - lookupTable10["yb"] = 8 - - // The slice of units is used to ensure that they are accessed by suffix from longest to - // shortest, i.e. match 'tib' before matching 'b'. - for unit := range lookupTable10 { - units10 = append(units10, unit) - } - sort.Sort(units10) -} - -// Pow is an integer version of exponentiation; existing builtin is float, we needed an int version. -func Pow(x int64, y int) int64 { - if y == 0 { - return 1 - } - - result := x - for n := 1; n < y; n++ { - result = result * x - } - return result -} - -// ConvertSizeToBytes converts size to bytes; see also https://en.wikipedia.org/wiki/Kilobyte -func ConvertSizeToBytes(s string) (string, error) { - - // make lowercase so units detection always works - s = strings.TrimSpace(strings.ToLower(s)) - - // first look for binary units - for _, unit := range units2 { - if strings.HasSuffix(s, unit) { - s = strings.TrimSuffix(s, unit) - if i, err := strconv.ParseInt(s, 10, 0); err != nil { - return "", fmt.Errorf("invalid size value '%s': %v", s, err) - } else { - i = i * Pow(1024, lookupTable2[unit]) - s = strconv.FormatInt(i, 10) - return s, nil - } - } - } - - // fall back to SI units - for _, unit := range units10 { - if strings.HasSuffix(s, unit) { - s = strings.TrimSuffix(s, unit) - if i, err := strconv.ParseInt(s, 10, 0); err != nil { - return "", fmt.Errorf("invalid size value '%s': %v", s, err) - } else { - i = i * Pow(1000, lookupTable10[unit]) - s = strconv.FormatInt(i, 10) - return s, nil - } - } - } - - // no valid units found, so ensure the value is a number - if _, err := strconv.ParseUint(s, 10, 64); err != nil { - return "", fmt.Errorf("invalid size value '%s': %v", s, err) - } - - return s, nil -} - -// GetVolumeSizeBytes determines the size, in bytes, of a volume from the "size" opt value. If "size" has a units -// suffix, that is handled here. If there are no units, the default is GiB. If size is not in opts, the specified -// default value is parsed identically and used instead. -func GetVolumeSizeBytes(opts map[string]string, defaultVolumeSize string) (uint64, error) { - - usingDefaultSize := false - usingDefaultUnits := false - - // Use the size if specified, else use the configured default size - size := GetV(opts, "size", "") - if size == "" { - size = defaultVolumeSize - usingDefaultSize = true - } - - // Default to GiB if no units are present - if !sizeHasUnits(size) { - size += "G" - usingDefaultUnits = true - } - - // Ensure the size is valid - sizeBytesStr, err := ConvertSizeToBytes(size) - if err != nil { - return 0, err - } - sizeBytes, _ := strconv.ParseUint(sizeBytesStr, 10, 64) - - log.WithFields(log.Fields{ - "sizeBytes": sizeBytes, - "size": size, - "usingDefaultSize": usingDefaultSize, - "usingDefaultUnits": usingDefaultUnits, - }).Debug("Determined volume size.") - - return sizeBytes, nil -} - -// sizeHasUnits checks whether a size string includes a units suffix. -func sizeHasUnits(size string) bool { - - // make lowercase so units detection always works - size = strings.TrimSpace(strings.ToLower(size)) - - for _, unit := range units2 { - if strings.HasSuffix(size, unit) { - return true - } - } - for _, unit := range units10 { - if strings.HasSuffix(size, unit) { - return true - } - } - return false -} - -// VolumeSizeWithinTolerance checks to see if requestedSize is within the delta of the currentSize. -// If within the delta true is returned. If not within the delta and requestedSize is less than the -// currentSize false is returned. -func VolumeSizeWithinTolerance(requestedSize int64, currentSize int64, delta int64) (bool, error) { - - sizeDiff := requestedSize - currentSize - if sizeDiff < 0 { - sizeDiff = -sizeDiff - } - - if sizeDiff <= delta { - return true, nil - } - return false, nil -} - -// GetV takes a map, key(s), and a defaultValue; will return the value of the key or defaultValue if none is set. -// If keys is a string of key values separated by "|", then each key is tried in turn. This allows compatibility -// with deprecated values, i.e. "fstype|fileSystemType". -func GetV(opts map[string]string, keys string, defaultValue string) string { - - for _, key := range strings.Split(keys, "|") { - // Try key first, then do a case-insensitive search - if value, ok := opts[key]; ok { - return value - } else { - for k, v := range opts { - if strings.EqualFold(k, key) { - return v - } - } - } - } - return defaultValue -} - -// RandomString returns a string of the specified length consisting only of alphabetic characters. -func RandomString(strSize int) string { - chars := "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - var bytes = make([]byte, strSize) - rand.Read(bytes) - for i, b := range bytes { - bytes[i] = chars[b%byte(len(chars))] - } - return string(bytes) -} - -// StringInSlice checks whether a string is in a list of strings -func StringInSlice(s string, list []string) bool { - for _, item := range list { - if item == s { - return true - } - } - return false -} - -func LogHTTPRequest(request *http.Request, requestBody []byte) { - header := ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" - footer := "--------------------------------------------------------------------------------" - - requestURL, _ := url.Parse(request.URL.String()) - requestURL.User = nil - - headers := make(map[string][]string) - for k, v := range request.Header { - headers[k] = v - } - delete(headers, "Authorization") - delete(headers, "Api-Key") - delete(headers, "Secret-Key") - - var body string - if requestBody == nil { - body = "" - } else { - body = string(requestBody) - } - - log.Debugf("\n%s\n%s %s\nHeaders: %v\nBody: %s\n%s", - header, request.Method, requestURL, headers, body, footer) -} - -func LogHTTPResponse(response *http.Response, responseBody []byte) { - header := "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" - footer := "================================================================================" - - headers := make(map[string][]string) - for k, v := range response.Header { - headers[k] = v - } - delete(headers, "Authorization") - delete(headers, "Api-Key") - delete(headers, "Secret-Key") - - var body string - if responseBody == nil { - body = "" - } else { - body = string(responseBody) - } - log.Debugf("\n%s\nStatus: %s\nHeaders: %v\nBody: %s\n%s", - header, response.Status, headers, body, footer) -} - -type HTTPError struct { - Status string - StatusCode int -} - -func (e HTTPError) Error() string { - return fmt.Sprintf("HTTP error: %s", e.Status) -} - -func NewHTTPError(response *http.Response) *HTTPError { - if response.StatusCode < 300 { - return nil - } - return &HTTPError{response.Status, response.StatusCode} -} - -// SliceContainsString checks to see if a []string contains a string -func SliceContainsString(slice []string, s string) bool { - for _, item := range slice { - if item == s { - return true - } - } - return false -} - -// RemoveStringFromSlice removes a string from a []string -func RemoveStringFromSlice(slice []string, s string) (result []string) { - for _, item := range slice { - if item == s { - continue - } - result = append(result, item) - } - return -} diff --git a/vendor/github.com/netapp/trident/utils/version.go b/vendor/github.com/netapp/trident/utils/version.go deleted file mode 100644 index a125ca801..000000000 --- a/vendor/github.com/netapp/trident/utils/version.go +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright 2018 NetApp, Inc. All Rights Reserved. - */ - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Version is an opaque representation of a version number -type Version struct { - components []uint - semver bool - datever bool - preRelease string - buildMetadata string -} - -var ( - // versionMatchRE splits a version string into numeric and "extra" parts - versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) - // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not - // validate the "no leading zeroes" constraint for pre-release - extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) -) - -func parse(str string, semver bool, datever bool) (*Version, error) { - parts := versionMatchRE.FindStringSubmatch(str) - if parts == nil { - return nil, fmt.Errorf("could not parse %q as version", str) - } - numbers, extra := parts[1], parts[2] - - components := strings.Split(numbers, ".") - if ((semver || datever) && len(components) != 3) || (!semver && !datever && len(components) < 2) { - return nil, fmt.Errorf("illegal version string %q", str) - } - - v := &Version{ - components: make([]uint, len(components)), - semver: semver, - datever: datever, - } - for i, comp := range components { - if (i == 0 || semver || (i != 1 && datever)) && strings.HasPrefix(comp, "0") && comp != "0" { - return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) - } - num, err := strconv.ParseUint(comp, 10, 0) - if err != nil { - return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) - } - if i == 1 && datever && (num < 1 || num > 12) { - return nil, fmt.Errorf("illegal month component %q in %q", comp, str) - } - v.components[i] = uint(num) - } - - if (semver || datever) && extra != "" { - extraParts := extraMatchRE.FindStringSubmatch(extra) - if extraParts == nil { - return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) - } - v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] - - for _, comp := range strings.Split(v.preRelease, ".") { - if _, err := strconv.ParseUint(comp, 10, 0); err == nil { - if strings.HasPrefix(comp, "0") && comp != "0" { - return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) - } - } - } - } - - return v, nil -} - -// ParseGeneric parses a "generic" version string. The version string must consist of two -// or more dot-separated numeric fields (the first of which can't have leading zeroes), -// followed by arbitrary uninterpreted data (which need not be separated from the final -// numeric field by punctuation). For convenience, leading and trailing whitespace is -// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. -func ParseGeneric(str string) (*Version, error) { - return parse(str, false, false) -} - -// MustParseGeneric is like ParseGeneric except that it panics on error -func MustParseGeneric(str string) *Version { - v, err := ParseGeneric(str) - if err != nil { - panic(err) - } - return v -} - -// ParseSemantic parses a version string that exactly obeys the syntax and semantics of -// the "Semantic Versioning" specification (http://semver.org/) (although it ignores -// leading and trailing whitespace, and allows the version to be preceded by "v"). For -// version strings that are not guaranteed to obey the Semantic Versioning syntax, use -// ParseGeneric. -func ParseSemantic(str string) (*Version, error) { - return parse(str, true, false) -} - -// MustParseSemantic is like ParseSemantic except that it panics on error -func MustParseSemantic(str string) *Version { - v, err := ParseSemantic(str) - if err != nil { - panic(err) - } - return v -} - -// ParseDate parses a version string that mostly obeys the syntax and semantics of -// the "Semantic Versioning" specification (http://semver.org/) (although it ignores -// leading and trailing whitespace, and allows the version to be preceded by "v"). -// Furthermore, it allows leading zeroes on the second numerical component so that -// the version may represent a two-digit month (i.e. 17.07.1). A date-based version -// string must be of the form ..[-preRelease][+buildMetadata], where -// the year/month/rev values are numerical, and the month must be in the range 1..12. -func ParseDate(str string) (*Version, error) { - return parse(str, false, true) -} - -// MustParseDate is like ParseDate except that it panics on error -func MustParseDate(str string) *Version { - v, err := ParseDate(str) - if err != nil { - panic(err) - } - return v -} - -func (v *Version) MajorVersion() uint { - return v.components[0] -} - -func (v *Version) MajorVersionString() string { - return strconv.FormatUint(uint64(v.components[0]), 10) -} - -func (v *Version) MinorVersion() uint { - return v.components[1] -} - -func (v *Version) MinorVersionString() string { - return strconv.FormatUint(uint64(v.components[1]), 10) -} - -func (v *Version) PatchVersion() uint { - if len(v.components) < 3 { - return 0 - } - return v.components[2] -} - -func (v *Version) PreRelease() string { - return v.preRelease -} - -// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" -func (v *Version) BuildMetadata() string { - return v.buildMetadata -} - -// String converts a Version back to a string; note that for versions parsed with -// ParseGeneric, this will not include the trailing uninterpreted portion of the version -// number. -func (v *Version) String() string { - var buffer bytes.Buffer - - for i, comp := range v.components { - if i > 0 { - buffer.WriteString(".") - } - if v.datever && i == 1 { - buffer.WriteString(fmt.Sprintf("%02d", comp)) - } else { - buffer.WriteString(fmt.Sprintf("%d", comp)) - } - } - if v.preRelease != "" { - buffer.WriteString("-") - buffer.WriteString(v.preRelease) - } - if v.buildMetadata != "" { - buffer.WriteString("+") - buffer.WriteString(v.buildMetadata) - } - - return buffer.String() -} - -// ShortString converts a Version back to a string, including only the major/minor/patch components. -func (v *Version) ShortString() string { - var buffer bytes.Buffer - - for i, comp := range v.components { - if i > 0 { - buffer.WriteString(".") - } - if v.datever && i == 1 { - buffer.WriteString(fmt.Sprintf("%02d", comp)) - } else { - buffer.WriteString(fmt.Sprintf("%d", comp)) - } - } - - return buffer.String() -} - -// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 -// if they are equal -func (v *Version) compareInternal(other *Version) int { - for i := range v.components { - switch { - case i >= len(other.components): - if v.components[i] != 0 { - return 1 - } - case other.components[i] < v.components[i]: - return 1 - case other.components[i] > v.components[i]: - return -1 - } - } - - if !(v.semver || v.datever) || !(other.semver || other.datever) { - return 0 - } - - switch { - case v.preRelease == "" && other.preRelease != "": - return 1 - case v.preRelease != "" && other.preRelease == "": - return -1 - case v.preRelease == other.preRelease: // includes case where both are "" - return 0 - } - - vPR := strings.Split(v.preRelease, ".") - oPR := strings.Split(other.preRelease, ".") - for i := range vPR { - if i >= len(oPR) { - return 1 - } - vNum, err := strconv.ParseUint(vPR[i], 10, 0) - if err == nil { - oNum, err := strconv.ParseUint(oPR[i], 10, 0) - if err == nil { - switch { - case oNum < vNum: - return 1 - case oNum > vNum: - return -1 - default: - continue - } - } - } - if oPR[i] < vPR[i] { - return 1 - } else if oPR[i] > vPR[i] { - return -1 - } - } - - return 0 -} - -// AtLeast tests if a version is at least equal to a given minimum version. If both -// Versions are Semantic Versions, this will use the Semantic Version comparison -// algorithm. Otherwise, it will compare only the numeric components, with non-present -// components being considered "0" (ie, "1.4" is equal to "1.4.0"). -func (v *Version) AtLeast(min *Version) bool { - return v.compareInternal(min) != -1 -} - -// LessThan tests if a version is less than a given version. (It is exactly the opposite -// of AtLeast, for situations where asking "is v too old?" makes more sense than asking -// "is v new enough?".) -func (v *Version) LessThan(other *Version) bool { - return v.compareInternal(other) == -1 -} - -// GreaterThan tests if a version is greater than a given version. -func (v *Version) GreaterThan(other *Version) bool { - return v.compareInternal(other) == 1 -} - -// Compare compares v against a version string (which will be parsed as either Semantic -// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if -// it is greater than other, or 0 if they are equal. -func (v *Version) Compare(other string) (int, error) { - ov, err := parse(other, v.semver, v.datever) - if err != nil { - return 0, err - } - return v.compareInternal(ov), nil -} - -func (v *Version) ToMajorMinorVersion() *Version { - return MustParseGeneric(fmt.Sprintf("%d.%d", v.MajorVersion(), v.MinorVersion())) -} - -func (v *Version) ToMajorMinorString() string { - return fmt.Sprintf("%d.%d", v.MajorVersion(), v.MinorVersion()) -} diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md deleted file mode 100644 index 1ac6a81f6..000000000 --- a/vendor/github.com/philhofer/fwd/LICENSE.md +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2014-2015, Philip Hofer - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md deleted file mode 100644 index 38349af34..000000000 --- a/vendor/github.com/philhofer/fwd/README.md +++ /dev/null @@ -1,315 +0,0 @@ - -# fwd - import "github.com/philhofer/fwd" - -The `fwd` package provides a buffered reader -and writer. Each has methods that help improve -the encoding/decoding performance of some binary -protocols. - -The `fwd.Writer` and `fwd.Reader` type provide similar -functionality to their counterparts in `bufio`, plus -a few extra utility methods that simplify read-ahead -and write-ahead. I wrote this package to improve serialization -performance for http://github.com/tinylib/msgp, -where it provided about a 2x speedup over `bufio` for certain -workloads. However, care must be taken to understand the semantics of the -extra methods provided by this package, as they allow -the user to access and manipulate the buffer memory -directly. - -The extra methods for `fwd.Reader` are `Peek`, `Skip` -and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, -will re-allocate the read buffer in order to accommodate arbitrarily -large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes -in the stream, and uses the `io.Seeker` interface if the underlying -stream implements it. `(*fwd.Reader).Next` returns a slice pointing -to the next `n` bytes in the read buffer (like `Peek`), but also -increments the read position. This allows users to process streams -in arbitrary block sizes without having to manage appropriately-sized -slices. Additionally, obviating the need to copy the data from the -buffer to another location in memory can improve performance dramatically -in CPU-bound applications. - -`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which -returns a slice pointing to the next `n` bytes of the writer, and increments -the write position by the length of the returned slice. This allows users -to write directly to the end of the buffer. - - - - -## Constants -``` go -const ( - // DefaultReaderSize is the default size of the read buffer - DefaultReaderSize = 2048 -) -``` -``` go -const ( - // DefaultWriterSize is the - // default write buffer size. - DefaultWriterSize = 2048 -) -``` - - - -## type Reader -``` go -type Reader struct { - // contains filtered or unexported fields -} -``` -Reader is a buffered look-ahead reader - - - - - - - - - -### func NewReader -``` go -func NewReader(r io.Reader) *Reader -``` -NewReader returns a new *Reader that reads from 'r' - - -### func NewReaderSize -``` go -func NewReaderSize(r io.Reader, n int) *Reader -``` -NewReaderSize returns a new *Reader that -reads from 'r' and has a buffer size 'n' - - - - -### func (\*Reader) BufferSize -``` go -func (r *Reader) BufferSize() int -``` -BufferSize returns the total size of the buffer - - - -### func (\*Reader) Buffered -``` go -func (r *Reader) Buffered() int -``` -Buffered returns the number of bytes currently in the buffer - - - -### func (\*Reader) Next -``` go -func (r *Reader) Next(n int) ([]byte, error) -``` -Next returns the next 'n' bytes in the stream. -Unlike Peek, Next advances the reader position. -The returned bytes point to the same -data as the buffer, so the slice is -only valid until the next reader method call. -An EOF is considered an unexpected error. -If an the returned slice is less than the -length asked for, an error will be returned, -and the reader position will not be incremented. - - - -### func (\*Reader) Peek -``` go -func (r *Reader) Peek(n int) ([]byte, error) -``` -Peek returns the next 'n' buffered bytes, -reading from the underlying reader if necessary. -It will only return a slice shorter than 'n' bytes -if it also returns an error. Peek does not advance -the reader. EOF errors are *not* returned as -io.ErrUnexpectedEOF. - - - -### func (\*Reader) Read -``` go -func (r *Reader) Read(b []byte) (int, error) -``` -Read implements `io.Reader` - - - -### func (\*Reader) ReadByte -``` go -func (r *Reader) ReadByte() (byte, error) -``` -ReadByte implements `io.ByteReader` - - - -### func (\*Reader) ReadFull -``` go -func (r *Reader) ReadFull(b []byte) (int, error) -``` -ReadFull attempts to read len(b) bytes into -'b'. It returns the number of bytes read into -'b', and an error if it does not return len(b). -EOF is considered an unexpected error. - - - -### func (\*Reader) Reset -``` go -func (r *Reader) Reset(rd io.Reader) -``` -Reset resets the underlying reader -and the read buffer. - - - -### func (\*Reader) Skip -``` go -func (r *Reader) Skip(n int) (int, error) -``` -Skip moves the reader forward 'n' bytes. -Returns the number of bytes skipped and any -errors encountered. It is analogous to Seek(n, 1). -If the underlying reader implements io.Seeker, then -that method will be used to skip forward. - -If the reader encounters -an EOF before skipping 'n' bytes, it -returns io.ErrUnexpectedEOF. If the -underlying reader implements io.Seeker, then -those rules apply instead. (Many implementations -will not return `io.EOF` until the next call -to Read.) - - - -### func (\*Reader) WriteTo -``` go -func (r *Reader) WriteTo(w io.Writer) (int64, error) -``` -WriteTo implements `io.WriterTo` - - - -## type Writer -``` go -type Writer struct { - // contains filtered or unexported fields -} -``` -Writer is a buffered writer - - - - - - - - - -### func NewWriter -``` go -func NewWriter(w io.Writer) *Writer -``` -NewWriter returns a new writer -that writes to 'w' and has a buffer -that is `DefaultWriterSize` bytes. - - -### func NewWriterSize -``` go -func NewWriterSize(w io.Writer, size int) *Writer -``` -NewWriterSize returns a new writer -that writes to 'w' and has a buffer -that is 'size' bytes. - - - - -### func (\*Writer) BufferSize -``` go -func (w *Writer) BufferSize() int -``` -BufferSize returns the maximum size of the buffer. - - - -### func (\*Writer) Buffered -``` go -func (w *Writer) Buffered() int -``` -Buffered returns the number of buffered bytes -in the reader. - - - -### func (\*Writer) Flush -``` go -func (w *Writer) Flush() error -``` -Flush flushes any buffered bytes -to the underlying writer. - - - -### func (\*Writer) Next -``` go -func (w *Writer) Next(n int) ([]byte, error) -``` -Next returns the next 'n' free bytes -in the write buffer, flushing the writer -as necessary. Next will return `io.ErrShortBuffer` -if 'n' is greater than the size of the write buffer. -Calls to 'next' increment the write position by -the size of the returned buffer. - - - -### func (\*Writer) ReadFrom -``` go -func (w *Writer) ReadFrom(r io.Reader) (int64, error) -``` -ReadFrom implements `io.ReaderFrom` - - - -### func (\*Writer) Write -``` go -func (w *Writer) Write(p []byte) (int, error) -``` -Write implements `io.Writer` - - - -### func (\*Writer) WriteByte -``` go -func (w *Writer) WriteByte(b byte) error -``` -WriteByte implements `io.ByteWriter` - - - -### func (\*Writer) WriteString -``` go -func (w *Writer) WriteString(s string) (int, error) -``` -WriteString is analogous to Write, but it takes a string. - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go deleted file mode 100644 index 75be62ab0..000000000 --- a/vendor/github.com/philhofer/fwd/reader.go +++ /dev/null @@ -1,383 +0,0 @@ -// The `fwd` package provides a buffered reader -// and writer. Each has methods that help improve -// the encoding/decoding performance of some binary -// protocols. -// -// The `fwd.Writer` and `fwd.Reader` type provide similar -// functionality to their counterparts in `bufio`, plus -// a few extra utility methods that simplify read-ahead -// and write-ahead. I wrote this package to improve serialization -// performance for http://github.com/tinylib/msgp, -// where it provided about a 2x speedup over `bufio` for certain -// workloads. However, care must be taken to understand the semantics of the -// extra methods provided by this package, as they allow -// the user to access and manipulate the buffer memory -// directly. -// -// The extra methods for `fwd.Reader` are `Peek`, `Skip` -// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, -// will re-allocate the read buffer in order to accommodate arbitrarily -// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes -// in the stream, and uses the `io.Seeker` interface if the underlying -// stream implements it. `(*fwd.Reader).Next` returns a slice pointing -// to the next `n` bytes in the read buffer (like `Peek`), but also -// increments the read position. This allows users to process streams -// in arbitrary block sizes without having to manage appropriately-sized -// slices. Additionally, obviating the need to copy the data from the -// buffer to another location in memory can improve performance dramatically -// in CPU-bound applications. -// -// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which -// returns a slice pointing to the next `n` bytes of the writer, and increments -// the write position by the length of the returned slice. This allows users -// to write directly to the end of the buffer. -// -package fwd - -import "io" - -const ( - // DefaultReaderSize is the default size of the read buffer - DefaultReaderSize = 2048 - - // minimum read buffer; straight from bufio - minReaderSize = 16 -) - -// NewReader returns a new *Reader that reads from 'r' -func NewReader(r io.Reader) *Reader { - return NewReaderSize(r, DefaultReaderSize) -} - -// NewReaderSize returns a new *Reader that -// reads from 'r' and has a buffer size 'n' -func NewReaderSize(r io.Reader, n int) *Reader { - rd := &Reader{ - r: r, - data: make([]byte, 0, max(minReaderSize, n)), - } - if s, ok := r.(io.Seeker); ok { - rd.rs = s - } - return rd -} - -// Reader is a buffered look-ahead reader -type Reader struct { - r io.Reader // underlying reader - - // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space - data []byte // data - n int // read offset - state error // last read error - - // if the reader past to NewReader was - // also an io.Seeker, this is non-nil - rs io.Seeker -} - -// Reset resets the underlying reader -// and the read buffer. -func (r *Reader) Reset(rd io.Reader) { - r.r = rd - r.data = r.data[0:0] - r.n = 0 - r.state = nil - if s, ok := rd.(io.Seeker); ok { - r.rs = s - } else { - r.rs = nil - } -} - -// more() does one read on the underlying reader -func (r *Reader) more() { - // move data backwards so that - // the read offset is 0; this way - // we can supply the maximum number of - // bytes to the reader - if r.n != 0 { - if r.n < len(r.data) { - r.data = r.data[:copy(r.data[0:], r.data[r.n:])] - } else { - r.data = r.data[:0] - } - r.n = 0 - } - var a int - a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) - if a == 0 && r.state == nil { - r.state = io.ErrNoProgress - return - } else if a > 0 && r.state == io.EOF { - // discard the io.EOF if we read more than 0 bytes. - // the next call to Read should return io.EOF again. - r.state = nil - } - r.data = r.data[:len(r.data)+a] -} - -// pop error -func (r *Reader) err() (e error) { - e, r.state = r.state, nil - return -} - -// pop error; EOF -> io.ErrUnexpectedEOF -func (r *Reader) noEOF() (e error) { - e, r.state = r.state, nil - if e == io.EOF { - e = io.ErrUnexpectedEOF - } - return -} - -// buffered bytes -func (r *Reader) buffered() int { return len(r.data) - r.n } - -// Buffered returns the number of bytes currently in the buffer -func (r *Reader) Buffered() int { return len(r.data) - r.n } - -// BufferSize returns the total size of the buffer -func (r *Reader) BufferSize() int { return cap(r.data) } - -// Peek returns the next 'n' buffered bytes, -// reading from the underlying reader if necessary. -// It will only return a slice shorter than 'n' bytes -// if it also returns an error. Peek does not advance -// the reader. EOF errors are *not* returned as -// io.ErrUnexpectedEOF. -func (r *Reader) Peek(n int) ([]byte, error) { - // in the degenerate case, - // we may need to realloc - // (the caller asked for more - // bytes than the size of the buffer) - if cap(r.data) < n { - old := r.data[r.n:] - r.data = make([]byte, n+r.buffered()) - r.data = r.data[:copy(r.data, old)] - r.n = 0 - } - - // keep filling until - // we hit an error or - // read enough bytes - for r.buffered() < n && r.state == nil { - r.more() - } - - // we must have hit an error - if r.buffered() < n { - return r.data[r.n:], r.err() - } - - return r.data[r.n : r.n+n], nil -} - -// Skip moves the reader forward 'n' bytes. -// Returns the number of bytes skipped and any -// errors encountered. It is analogous to Seek(n, 1). -// If the underlying reader implements io.Seeker, then -// that method will be used to skip forward. -// -// If the reader encounters -// an EOF before skipping 'n' bytes, it -// returns io.ErrUnexpectedEOF. If the -// underlying reader implements io.Seeker, then -// those rules apply instead. (Many implementations -// will not return `io.EOF` until the next call -// to Read.) -func (r *Reader) Skip(n int) (int, error) { - - // fast path - if r.buffered() >= n { - r.n += n - return n, nil - } - - // use seeker implementation - // if we can - if r.rs != nil { - return r.skipSeek(n) - } - - // loop on filling - // and then erasing - o := n - for r.buffered() < n && r.state == nil { - r.more() - // we can skip forward - // up to r.buffered() bytes - step := min(r.buffered(), n) - r.n += step - n -= step - } - // at this point, n should be - // 0 if everything went smoothly - return o - n, r.noEOF() -} - -// Next returns the next 'n' bytes in the stream. -// Unlike Peek, Next advances the reader position. -// The returned bytes point to the same -// data as the buffer, so the slice is -// only valid until the next reader method call. -// An EOF is considered an unexpected error. -// If an the returned slice is less than the -// length asked for, an error will be returned, -// and the reader position will not be incremented. -func (r *Reader) Next(n int) ([]byte, error) { - - // in case the buffer is too small - if cap(r.data) < n { - old := r.data[r.n:] - r.data = make([]byte, n+r.buffered()) - r.data = r.data[:copy(r.data, old)] - r.n = 0 - } - - // fill at least 'n' bytes - for r.buffered() < n && r.state == nil { - r.more() - } - - if r.buffered() < n { - return r.data[r.n:], r.noEOF() - } - out := r.data[r.n : r.n+n] - r.n += n - return out, nil -} - -// skipSeek uses the io.Seeker to seek forward. -// only call this function when n > r.buffered() -func (r *Reader) skipSeek(n int) (int, error) { - o := r.buffered() - // first, clear buffer - n -= o - r.n = 0 - r.data = r.data[:0] - - // then seek forward remaning bytes - i, err := r.rs.Seek(int64(n), 1) - return int(i) + o, err -} - -// Read implements `io.Reader` -func (r *Reader) Read(b []byte) (int, error) { - // if we have data in the buffer, just - // return that. - if r.buffered() != 0 { - x := copy(b, r.data[r.n:]) - r.n += x - return x, nil - } - var n int - // we have no buffered data; determine - // whether or not to buffer or call - // the underlying reader directly - if len(b) >= cap(r.data) { - n, r.state = r.r.Read(b) - } else { - r.more() - n = copy(b, r.data) - r.n = n - } - if n == 0 { - return 0, r.err() - } - return n, nil -} - -// ReadFull attempts to read len(b) bytes into -// 'b'. It returns the number of bytes read into -// 'b', and an error if it does not return len(b). -// EOF is considered an unexpected error. -func (r *Reader) ReadFull(b []byte) (int, error) { - var n int // read into b - var nn int // scratch - l := len(b) - // either read buffered data, - // or read directly for the underlying - // buffer, or fetch more buffered data. - for n < l && r.state == nil { - if r.buffered() != 0 { - nn = copy(b[n:], r.data[r.n:]) - n += nn - r.n += nn - } else if l-n > cap(r.data) { - nn, r.state = r.r.Read(b[n:]) - n += nn - } else { - r.more() - } - } - if n < l { - return n, r.noEOF() - } - return n, nil -} - -// ReadByte implements `io.ByteReader` -func (r *Reader) ReadByte() (byte, error) { - for r.buffered() < 1 && r.state == nil { - r.more() - } - if r.buffered() < 1 { - return 0, r.err() - } - b := r.data[r.n] - r.n++ - return b, nil -} - -// WriteTo implements `io.WriterTo` -func (r *Reader) WriteTo(w io.Writer) (int64, error) { - var ( - i int64 - ii int - err error - ) - // first, clear buffer - if r.buffered() > 0 { - ii, err = w.Write(r.data[r.n:]) - i += int64(ii) - if err != nil { - return i, err - } - r.data = r.data[0:0] - r.n = 0 - } - for r.state == nil { - // here we just do - // 1:1 reads and writes - r.more() - if r.buffered() > 0 { - ii, err = w.Write(r.data) - i += int64(ii) - if err != nil { - return i, err - } - r.data = r.data[0:0] - r.n = 0 - } - } - if r.state != io.EOF { - return i, r.err() - } - return i, nil -} - -func min(a int, b int) int { - if a < b { - return a - } - return b -} - -func max(a int, b int) int { - if a < b { - return b - } - return a -} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go deleted file mode 100644 index 2dc392a91..000000000 --- a/vendor/github.com/philhofer/fwd/writer.go +++ /dev/null @@ -1,224 +0,0 @@ -package fwd - -import "io" - -const ( - // DefaultWriterSize is the - // default write buffer size. - DefaultWriterSize = 2048 - - minWriterSize = minReaderSize -) - -// Writer is a buffered writer -type Writer struct { - w io.Writer // writer - buf []byte // 0:len(buf) is bufered data -} - -// NewWriter returns a new writer -// that writes to 'w' and has a buffer -// that is `DefaultWriterSize` bytes. -func NewWriter(w io.Writer) *Writer { - if wr, ok := w.(*Writer); ok { - return wr - } - return &Writer{ - w: w, - buf: make([]byte, 0, DefaultWriterSize), - } -} - -// NewWriterSize returns a new writer -// that writes to 'w' and has a buffer -// that is 'size' bytes. -func NewWriterSize(w io.Writer, size int) *Writer { - if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size { - return wr - } - return &Writer{ - w: w, - buf: make([]byte, 0, max(size, minWriterSize)), - } -} - -// Buffered returns the number of buffered bytes -// in the reader. -func (w *Writer) Buffered() int { return len(w.buf) } - -// BufferSize returns the maximum size of the buffer. -func (w *Writer) BufferSize() int { return cap(w.buf) } - -// Flush flushes any buffered bytes -// to the underlying writer. -func (w *Writer) Flush() error { - l := len(w.buf) - if l > 0 { - n, err := w.w.Write(w.buf) - - // if we didn't write the whole - // thing, copy the unwritten - // bytes to the beginnning of the - // buffer. - if n < l && n > 0 { - w.pushback(n) - if err == nil { - err = io.ErrShortWrite - } - } - if err != nil { - return err - } - w.buf = w.buf[:0] - return nil - } - return nil -} - -// Write implements `io.Writer` -func (w *Writer) Write(p []byte) (int, error) { - c, l, ln := cap(w.buf), len(w.buf), len(p) - avail := c - l - - // requires flush - if avail < ln { - if err := w.Flush(); err != nil { - return 0, err - } - l = len(w.buf) - } - // too big to fit in buffer; - // write directly to w.w - if c < ln { - return w.w.Write(p) - } - - // grow buf slice; copy; return - w.buf = w.buf[:l+ln] - return copy(w.buf[l:], p), nil -} - -// WriteString is analogous to Write, but it takes a string. -func (w *Writer) WriteString(s string) (int, error) { - c, l, ln := cap(w.buf), len(w.buf), len(s) - avail := c - l - - // requires flush - if avail < ln { - if err := w.Flush(); err != nil { - return 0, err - } - l = len(w.buf) - } - // too big to fit in buffer; - // write directly to w.w - // - // yes, this is unsafe. *but* - // io.Writer is not allowed - // to mutate its input or - // maintain a reference to it, - // per the spec in package io. - // - // plus, if the string is really - // too big to fit in the buffer, then - // creating a copy to write it is - // expensive (and, strictly speaking, - // unnecessary) - if c < ln { - return w.w.Write(unsafestr(s)) - } - - // grow buf slice; copy; return - w.buf = w.buf[:l+ln] - return copy(w.buf[l:], s), nil -} - -// WriteByte implements `io.ByteWriter` -func (w *Writer) WriteByte(b byte) error { - if len(w.buf) == cap(w.buf) { - if err := w.Flush(); err != nil { - return err - } - } - w.buf = append(w.buf, b) - return nil -} - -// Next returns the next 'n' free bytes -// in the write buffer, flushing the writer -// as necessary. Next will return `io.ErrShortBuffer` -// if 'n' is greater than the size of the write buffer. -// Calls to 'next' increment the write position by -// the size of the returned buffer. -func (w *Writer) Next(n int) ([]byte, error) { - c, l := cap(w.buf), len(w.buf) - if n > c { - return nil, io.ErrShortBuffer - } - avail := c - l - if avail < n { - if err := w.Flush(); err != nil { - return nil, err - } - l = len(w.buf) - } - w.buf = w.buf[:l+n] - return w.buf[l:], nil -} - -// take the bytes from w.buf[n:len(w.buf)] -// and put them at the beginning of w.buf, -// and resize to the length of the copied segment. -func (w *Writer) pushback(n int) { - w.buf = w.buf[:copy(w.buf, w.buf[n:])] -} - -// ReadFrom implements `io.ReaderFrom` -func (w *Writer) ReadFrom(r io.Reader) (int64, error) { - // anticipatory flush - if err := w.Flush(); err != nil { - return 0, err - } - - w.buf = w.buf[0:cap(w.buf)] // expand buffer - - var nn int64 // written - var err error // error - var x int // read - - // 1:1 reads and writes - for err == nil { - x, err = r.Read(w.buf) - if x > 0 { - n, werr := w.w.Write(w.buf[:x]) - nn += int64(n) - - if err != nil { - if n < x && n > 0 { - w.pushback(n - x) - } - return nn, werr - } - if n < x { - w.pushback(n - x) - return nn, io.ErrShortWrite - } - } else if err == nil { - err = io.ErrNoProgress - break - } - } - if err != io.EOF { - return nn, err - } - - // we only clear here - // because we are sure - // the writes have - // succeeded. otherwise, - // we retain the data in case - // future writes succeed. - w.buf = w.buf[0:0] - - return nn, nil -} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go deleted file mode 100644 index e367f3931..000000000 --- a/vendor/github.com/philhofer/fwd/writer_appengine.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build appengine - -package fwd - -func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go deleted file mode 100644 index a0bf453b3..000000000 --- a/vendor/github.com/philhofer/fwd/writer_unsafe.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !appengine - -package fwd - -import ( - "reflect" - "unsafe" -) - -// unsafe cast string as []byte -func unsafestr(b string) []byte { - l := len(b) - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Len: l, - Cap: l, - Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, - })) -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad612..000000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99fad..000000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index dd878a30e..000000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,23 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 44986bff0..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index c0d70b2fa..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collecter (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index d463e36d3..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - labelPairs []*dto.LabelPair -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) Write(out *dto.Metric) error { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - val := fval + float64(ival) - - return populateMetric(CounterValue, val, c.labelPairs, out) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *metricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 1d034f871..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - - vh := hashNew() - for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) - } - d.id = vh - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) - for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) - } - d.dimHash = lh - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(labelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 5d9525def..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// -// Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or -// UntypedOpts. -// -// Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will marke the Collector “unchecked”. No -// checks are porformed at registration time, but metric consistency will still -// be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situatios where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) -// -// Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index 18a99d5fa..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a735..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index 71d406bd9..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, out) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *metricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index ba3b9333e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "runtime" - "runtime/debug" - "time" -) - -type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - goInfoDesc *Desc - - // metrics to describe and collect - metrics memStatsMetrics -} - -// NewGoCollector returns a collector which exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This causes a stop-the-world, which is very short with Go1.9+ -// (~25µs). However, with older Go versions, the stop-the-world duration depends -// on the heap size and can be quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -func NewGoCollector() Collector { - return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the GC invocation durations.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - metrics: memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("last_gc_time_seconds"), - "Number of seconds since 1970 of last garbage collection.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }, - }, - } -} - -func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.goInfoDesc - for _, i := range c.metrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} - -// memStatsMetrics provide description, value, and value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index f88da707b..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make counts - // for both states: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - buckets []uint64 -} - -type histogram struct { - // countAndHotIdx is a complicated one. For lock-free yet atomic - // observations, we need to save the total count of observations again, - // combined with the index of the currently-hot counts struct, so that - // we can perform the operation on both values atomically. The least - // significant bit defines the hot counts struct. The remaining 63 bits - // represent the total count of observations. This happens under the - // assumption that the 63bit count will never overflow. Rationale: An - // observations takes about 30ns. Let's assume it could happen in - // 10ns. Overflowing the counter will then take at least (2^63)*10ns, - // which is about 3000 years. - // - // This has to be first in the struct for 64bit alignment. See - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - upperBounds []float64 - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - hotIdx int // Index of currently-hot counts. Only used within Write. - - labelPairs []*dto.LabelPair -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - - // We increment h.countAndHotIdx by 2 so that the counter in the upper - // 63 bits gets incremented by 1. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 2) - hotCounts := h.counts[n%2] - - if i < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[i], 1) - } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (h *histogram) Write(out *dto.Metric) error { - var ( - his = &dto.Histogram{} - buckets = make([]*dto.Bucket, len(h.upperBounds)) - hotCounts, coldCounts *histogramCounts - count uint64 - ) - - // For simplicity, we mutex the rest of this method. It is not in the - // hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() - - // This is a bit arcane, which is why the following spells out this if - // clause in English: - // - // If the currently-hot counts struct is #0, we atomically increment - // h.countAndHotIdx by 1 so that from now on Observe will use the counts - // struct #1. Furthermore, the atomic increment gives us the new value, - // which, in its most significant 63 bits, tells us the count of - // observations done so far up to and including currently ongoing - // observations still using the counts struct just changed from hot to - // cold. To have a normal uint64 for the count, we bitshift by 1 and - // save the result in count. We also set h.hotIdx to 1 for the next - // Write call, and we will refer to counts #1 as hotCounts and to counts - // #0 as coldCounts. - // - // If the currently-hot counts struct is #1, we do the corresponding - // things the other way round. We have to _decrement_ h.countAndHotIdx - // (which is a bit arcane in itself, as we have to express -1 with an - // unsigned int...). - if h.hotIdx == 0 { - count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 - h.hotIdx = 1 - hotCounts = h.counts[1] - coldCounts = h.counts[0] - } else { - count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. - h.hotIdx = 0 - hotCounts = h.counts[0] - coldCounts = h.counts[1] - } - - // Now we have to wait for the now-declared-cold counts to actually cool - // down, i.e. wait for all observations still using it to finish. That's - // the case once the count in the cold counts struct is the same as the - // one atomically retrieved from the upper 63bits of h.countAndHotIdx. - for { - if count == atomic.LoadUint64(&coldCounts.count) { - break - } - runtime.Gosched() // Let observations get work done. - } - - his.SampleCount = proto.Uint64(count) - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - } - - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } - return nil -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *metricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index 9f0875bfc..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "compress/gzip" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead. -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) -// instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - httpError(rsp, err) - return - } - - contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - httpError(rsp, err) - return - } - } - }) -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: (1) It uses Summaries -// rather than Histograms. Summaries are not useful if aggregation across -// multiple instances is required. (2) It uses microseconds as unit, which is -// deprecated and should be replaced by seconds. (3) The size of the request is -// calculated in a separate goroutine. Since this calculator requires access to -// the request header, it creates a race with any writes to the header performed -// during request handling. httputil.ReverseProxy is a prominent example for a -// handler performing such writes. (4) It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current goroutine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() - - return out -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 351c26e1a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 2744443ac..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index 55e6d86d5..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - "time" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -const separatorByte byte = 255 - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 5806cd09e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index 55176d58c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "os" - - "github.com/prometheus/procfs" -) - -type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// Currently, the collector depends on a Linux-style proc filesystem and -// therefore only exports metrics for Linux. -// -// Note: An older version of this function had the following signature: -// -// NewProcessCollector(pid int, namespace string) Collector -// -// Most commonly, it was called as -// -// NewProcessCollector(os.Getpid(), "") -// -// The following call of the current version is equivalent to the above: -// -// NewProcessCollector(ProcessCollectorOpts{}) -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } - - if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { - c.collectFn = c.processCollect - } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } - } - - return c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go deleted file mode 100644 index 67b56d37c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - closeNotifier = 1 << iota - flusher - hijacker - readerFrom - pusher -) - -type delegator interface { - http.ResponseWriter - - Status() int - Written() int64 -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) - if r.observeWriteHeader != nil { - r.observeWriteHeader(code) - } -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } - -func (d closeNotifierDelegator) CloseNotify() <-chan bool { - return d.ResponseWriter.(http.CloseNotifier).CloseNotify() -} -func (d flusherDelegator) Flush() { - d.ResponseWriter.(http.Flusher).Flush() -} -func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return d.ResponseWriter.(http.Hijacker).Hijack() -} -func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - d.written += n - return n, err -} - -var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) - -func init() { - // TODO(beorn7): Code generation would help here. - pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 - return d - } - pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} - } - pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} - } - pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 - return struct { - *responseWriterDelegator - http.Flusher - http.CloseNotifier - }{d, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} - } - pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 - return struct { - *responseWriterDelegator - http.Hijacker - http.CloseNotifier - }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - }{d, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 - return readerFromDelegator{d} - } - pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.CloseNotifier - }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - }{d, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - }{d, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go deleted file mode 100644 index 31a706956..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -type pusherDelegator struct{ *responseWriterDelegator } - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -func init() { - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go deleted file mode 100644 index 8bb9b8b68..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go deleted file mode 100644 index 668eb6b3c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promhttp provides tooling around HTTP servers and clients. -// -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. -// -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. -package promhttp - -import ( - "compress/gzip" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an http.Handler for the prometheus.DefaultGatherer, using -// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has -// no error logging, and it applies compression if requested by the client. -// -// The returned http.Handler is already instrumented using the -// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you -// create multiple http.Handlers by separate calls of the Handler function, the -// metrics used for instrumentation will be shared between them, providing -// global scrape counts. -// -// This function is meant to cover the bulk of basic use cases. If you are doing -// anything that requires more customization (including using a non-default -// Gatherer, different instrumentation, and non-default HandlerOpts), use the -// HandlerFor function. See there for details. -func Handler() http.Handler { - return InstrumentMetricHandler( - prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), - ) -} - -// HandlerFor returns an uninstrumented http.Handler for the provided -// Gatherer. The behavior of the Handler is defined by the provided -// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom -// Gatherers, with non-default HandlerOpts, and/or with custom (or no) -// instrumentation. Use the InstrumentMetricHandler function to apply the same -// kind of instrumentation as it is used by the Handler function. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var inFlightSem chan struct{} - if opts.MaxRequestsInFlight > 0 { - inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) - } - - h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - if inFlightSem != nil { - select { - case inFlightSem <- struct{}{}: // All good, carry on. - defer func() { <-inFlightSem }() - default: - http.Error(rsp, fmt.Sprintf( - "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, - ), http.StatusServiceUnavailable) - return - } - } - mfs, err := reg.Gather() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - // Still report the error if no metrics have been gathered. - httpError(rsp, err) - return - } - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - - contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - // Handled later. - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - } - - if lastErr != nil { - httpError(rsp, lastErr) - } - }) - - if opts.Timeout <= 0 { - return h - } - return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( - "Exceeded configured timeout of %v.\n", - opts.Timeout, - )) -} - -// InstrumentMetricHandler is usually used with an http.Handler returned by the -// HandlerFor function. It instruments the provided http.Handler with two -// metrics: A counter vector "promhttp_metric_handler_requests_total" to count -// scrapes partitioned by HTTP status code, and a gauge -// "promhttp_metric_handler_requests_in_flight" to track the number of -// simultaneous scrapes. This function idempotently registers collectors for -// both metrics with the provided Registerer. It panics if the registration -// fails. The provided metrics are useful to see how many scrapes hit the -// monitored target (which could be from different Prometheus servers or other -// scrapers), and how often they overlap (which would result in more than one -// scrape in flight at the same time). Note that the scrapes-in-flight gauge -// will contain the scrape by which it is exposed, while the scrape counter will -// only get incremented after the scrape is complete (as only then the status -// code is known). For tracking scrape durations, use the -// "scrape_duration_seconds" gauge created by the Prometheus server upon each -// scrape. -func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { - cnt := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_requests_total", - Help: "Total number of scrapes by HTTP status code.", - }, - []string{"code"}, - ) - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - cnt.WithLabelValues("503") - if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - cnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - - gge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "promhttp_metric_handler_requests_in_flight", - Help: "Current number of scrapes being served.", - }) - if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - gge = are.ExistingCollector.(prometheus.Gauge) - } else { - panic(err) - } - } - - return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. - DisableCompression bool - // The number of concurrent HTTP requests is limited to - // MaxRequestsInFlight. Additional requests are responded to with 503 - // Service Unavailable and a suitable message in the body. If - // MaxRequestsInFlight is 0 or negative, no limit is applied. - MaxRequestsInFlight int - // If handling a request takes longer than Timeout, it is responded to - // with 503 ServiceUnavailable and a suitable Message. No timeout is - // applied if Timeout is 0 or negative. Note that with the current - // implementation, reaching the timeout simply ends the HTTP requests as - // described above (and even that only if sending of the body hasn't - // started yet), while the bulk work of gathering all the metrics keeps - // running in the background (with the eventual result to be thrown - // away). Until the implementation is improved, it is recommended to - // implement a separate timeout in potentially slow Collectors. - Timeout time.Duration -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index 86fd56447..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - }) -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. Partitioning of the CounterVec happens by HTTP status code -// and/or HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(counter) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() - } - return resp, err - }) -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided -// ObserverVec. The ObserverVec must have zero, one, or two non-const -// non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. The Observe method of the Observer -// in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(obs) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) - } - return resp, err - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go deleted file mode 100644 index a034d1ec0..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" -) - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index 9db243805..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "errors" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) - }) -} - -// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have zero, one, or two non-const non-curried labels. For those, the only -// allowed label names are "code" and "method". The function panics -// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or -// HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(counter) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status())).Inc() - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0)).Inc() - }) -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) - }) - next.ServeHTTP(d, r) - }) -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) - }) -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the response size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { - code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) - }) -} - -func checkLabels(c prometheus.Collector) (code bool, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - m prometheus.Metric - pm dto.Metric - lvs []string - ) - - // Get the Desc from the Collector. - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - // Create a ConstMetric with the Desc. Since we don't know how many - // variable labels there are, try for as long as it needs. - for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { - m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) - } - - // Write out the metric into a proto message and look at the labels. - // If the value is not the magicString, it is a constLabel, which doesn't interest us. - // If the label is curried, it doesn't interest us. - // In all other cases, only "code" or "method" is allowed. - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString || isLabelCurried(c, name) { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - } - return -} - -func isLabelCurried(c prometheus.Collector, label string) bool { - // This is even hackier than the label test above. - // We essentially try to curry again and see if it works. - // But for that, we need to type-convert to the two - // types we use here, ObserverVec or *CounterVec. - switch v := c.(type) { - case *prometheus.CounterVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - case prometheus.ObserverVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - default: - panic("unsupported metric vec type") - } - return true -} - -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - -func labels(code, method bool, reqMethod string, status int) prometheus.Labels { - if !(code || method) { - return emptyLabels - } - labels := prometheus.Labels{} - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, santizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go b/vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go deleted file mode 100644 index 3d62b5725..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -// This file contains only deprecated code. Remove after v0.9 is released. - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - - "github.com/prometheus/common/expfmt" - "github.com/prometheus/common/model" - - "github.com/prometheus/client_golang/prometheus" -) - -// FromGatherer triggers a metric collection by the provided Gatherer (which is -// usually implemented by a prometheus.Registry) and pushes all gathered metrics -// to the Pushgateway specified by url, using the provided job name and the -// (optional) further grouping labels (the grouping map may be nil). See the -// Pushgateway documentation for detailed implications of the job and other -// grouping labels. Neither the job name nor any grouping label value may -// contain a "/". The metrics pushed must not contain a job label of their own -// nor any of the grouping labels. -// -// You can use just host:port or ip:port as url, in which case 'http://' is -// added automatically. You can also include the schema in the URL. However, do -// not include the '/metrics/jobs/...' part. -// -// Note that all previously pushed metrics with the same job and other grouping -// labels will be replaced with the metrics pushed by this call. (It uses HTTP -// method 'PUT' to push to the Pushgateway.) -// -// Deprecated: Please use a Pusher created with New instead. -func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { - return push(job, grouping, url, g, "PUT") -} - -// AddFromGatherer works like FromGatherer, but only previously pushed metrics -// with the same name (and the same job and other grouping labels) will be -// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) -// -// Deprecated: Please use a Pusher created with New instead. -func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { - return push(job, grouping, url, g, "POST") -} - -func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { - if !strings.Contains(pushURL, "://") { - pushURL = "http://" + pushURL - } - if strings.HasSuffix(pushURL, "/") { - pushURL = pushURL[:len(pushURL)-1] - } - - if strings.Contains(job, "/") { - return fmt.Errorf("job contains '/': %s", job) - } - urlComponents := []string{url.QueryEscape(job)} - for ln, lv := range grouping { - if !model.LabelName(ln).IsValid() { - return fmt.Errorf("grouping label has invalid name: %s", ln) - } - if strings.Contains(lv, "/") { - return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) - } - urlComponents = append(urlComponents, ln, lv) - } - pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) - - mfs, err := g.Gather() - if err != nil { - return err - } - buf := &bytes.Buffer{} - enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) - // Check for pre-existing grouping labels: - for _, mf := range mfs { - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - if l.GetName() == "job" { - return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) - } - if _, ok := grouping[l.GetName()]; ok { - return fmt.Errorf( - "pushed metric %s (%s) already contains grouping label %s", - mf.GetName(), m, l.GetName(), - ) - } - } - } - enc.Encode(mf) - } - req, err := http.NewRequest(method, pushURL, buf) - if err != nil { - return err - } - req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 202 { - body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. - return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) - } - return nil -} - -// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, -// it collects from the provided collectors directly. It is a convenient way to -// push only a few metrics. -// -// Deprecated: Please use a Pusher created with New instead. -func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { - return pushCollectors(job, grouping, url, "PUT", collectors...) -} - -// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. -// Instead, it collects from the provided collectors directly. It is a -// convenient way to push only a few metrics. -// -// Deprecated: Please use a Pusher created with New instead. -func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { - return pushCollectors(job, grouping, url, "POST", collectors...) -} - -func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { - r := prometheus.NewRegistry() - for _, collector := range collectors { - if err := r.Register(collector); err != nil { - return err - } - } - return push(job, grouping, url, r, method) -} - -// HostnameGroupingKey returns a label map with the only entry -// {instance=""}. This can be conveniently used as the grouping -// parameter if metrics should be pushed with the hostname as label. The -// returned map is created upon each call so that the caller is free to add more -// labels to the map. -// -// Deprecated: Usually, metrics pushed to the Pushgateway should not be -// host-centric. (You would use https://github.com/prometheus/node_exporter in -// that case.) If you have the need to add the hostname to the grouping key, you -// are probably doing something wrong. See -// https://prometheus.io/docs/practices/pushing/ for details. -func HostnameGroupingKey() map[string]string { - hostname, err := os.Hostname() - if err != nil { - return map[string]string{"instance": "unknown"} - } - return map[string]string{"instance": hostname} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go deleted file mode 100644 index 3721ff198..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package push provides functions to push metrics to a Pushgateway. It uses a -// builder approach. Create a Pusher with New and then add the various options -// by using its methods, finally calling Add or Push, like this: -// -// // Easy case: -// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() -// -// // Complex case: -// push.New("http://example.org/metrics", "my_job"). -// Collector(myCollector1). -// Collector(myCollector2). -// Grouping("zone", "xy"). -// Client(&myHTTPClient). -// BasicAuth("top", "secret"). -// Add() -// -// See the examples section for more detailed examples. -// -// See the documentation of the Pushgateway to understand the meaning of -// the grouping key and the differences between Push and Add: -// https://github.com/prometheus/pushgateway -package push - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/prometheus/common/expfmt" - "github.com/prometheus/common/model" - - "github.com/prometheus/client_golang/prometheus" -) - -const contentTypeHeader = "Content-Type" - -// Pusher manages a push to the Pushgateway. Use New to create one, configure it -// with its methods, and finally use the Add or Push method to push. -type Pusher struct { - error error - - url, job string - grouping map[string]string - - gatherers prometheus.Gatherers - registerer prometheus.Registerer - - client *http.Client - useBasicAuth bool - username, password string -} - -// New creates a new Pusher to push to the provided URL with the provided job -// name. You can use just host:port or ip:port as url, in which case “http://” -// is added automatically. Alternatively, include the schema in the -// URL. However, do not include the “/metrics/jobs/…” part. -// -// Note that until https://github.com/prometheus/pushgateway/issues/97 is -// resolved, a “/” character in the job name is prohibited. -func New(url, job string) *Pusher { - var ( - reg = prometheus.NewRegistry() - err error - ) - if !strings.Contains(url, "://") { - url = "http://" + url - } - if strings.HasSuffix(url, "/") { - url = url[:len(url)-1] - } - if strings.Contains(job, "/") { - err = fmt.Errorf("job contains '/': %s", job) - } - - return &Pusher{ - error: err, - url: url, - job: job, - grouping: map[string]string{}, - gatherers: prometheus.Gatherers{reg}, - registerer: reg, - client: &http.Client{}, - } -} - -// Push collects/gathers all metrics from all Collectors and Gatherers added to -// this Pusher. Then, it pushes them to the Pushgateway configured while -// creating this Pusher, using the configured job name and any added grouping -// labels as grouping key. All previously pushed metrics with the same job and -// other grouping labels will be replaced with the metrics pushed by this -// call. (It uses HTTP method “PUT” to push to the Pushgateway.) -// -// Push returns the first error encountered by any method call (including this -// one) in the lifetime of the Pusher. -func (p *Pusher) Push() error { - return p.push("PUT") -} - -// Add works like push, but only previously pushed metrics with the same name -// (and the same job and other grouping labels) will be replaced. (It uses HTTP -// method “POST” to push to the Pushgateway.) -func (p *Pusher) Add() error { - return p.push("POST") -} - -// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered -// to push them to the Pushgateway. The gathered metrics must not contain a job -// label of their own. -// -// For convenience, this method returns a pointer to the Pusher itself. -func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher { - p.gatherers = append(p.gatherers, g) - return p -} - -// Collector adds a Collector to the Pusher, from which metrics will be -// collected to push them to the Pushgateway. The collected metrics must not -// contain a job label of their own. -// -// For convenience, this method returns a pointer to the Pusher itself. -func (p *Pusher) Collector(c prometheus.Collector) *Pusher { - if p.error == nil { - p.error = p.registerer.Register(c) - } - return p -} - -// Grouping adds a label pair to the grouping key of the Pusher, replacing any -// previously added label pair with the same label name. Note that setting any -// labels in the grouping key that are already contained in the metrics to push -// will lead to an error. -// -// For convenience, this method returns a pointer to the Pusher itself. -// -// Note that until https://github.com/prometheus/pushgateway/issues/97 is -// resolved, this method does not allow a “/” character in the label value. -func (p *Pusher) Grouping(name, value string) *Pusher { - if p.error == nil { - if !model.LabelName(name).IsValid() { - p.error = fmt.Errorf("grouping label has invalid name: %s", name) - return p - } - if strings.Contains(value, "/") { - p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value) - return p - } - p.grouping[name] = value - } - return p -} - -// Client sets a custom HTTP client for the Pusher. For convenience, this method -// returns a pointer to the Pusher itself. -func (p *Pusher) Client(c *http.Client) *Pusher { - p.client = c - return p -} - -// BasicAuth configures the Pusher to use HTTP Basic Authentication with the -// provided username and password. For convenience, this method returns a -// pointer to the Pusher itself. -func (p *Pusher) BasicAuth(username, password string) *Pusher { - p.useBasicAuth = true - p.username = username - p.password = password - return p -} - -func (p *Pusher) push(method string) error { - if p.error != nil { - return p.error - } - urlComponents := []string{url.QueryEscape(p.job)} - for ln, lv := range p.grouping { - urlComponents = append(urlComponents, ln, lv) - } - pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/")) - - mfs, err := p.gatherers.Gather() - if err != nil { - return err - } - buf := &bytes.Buffer{} - enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) - // Check for pre-existing grouping labels: - for _, mf := range mfs { - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - if l.GetName() == "job" { - return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) - } - if _, ok := p.grouping[l.GetName()]; ok { - return fmt.Errorf( - "pushed metric %s (%s) already contains grouping label %s", - mf.GetName(), m, l.GetName(), - ) - } - } - } - enc.Encode(mf) - } - req, err := http.NewRequest(method, pushURL, buf) - if err != nil { - return err - } - if p.useBasicAuth { - req.SetBasicAuth(p.username, p.password) - } - req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) - resp, err := p.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 202 { - body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. - return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index b5e70b93f..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,937 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe methed does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, add it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID += desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - r.mtx.RLock() - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckdMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - if multiErr, ok := err.(MultiError); ok { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := hashNew() - h = hashAdd(h, name) - h = hashAddByte(h, separatorByte) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } - for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetName()) - h = hashAddByte(h, separatorByte) - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - } - if _, exists := metricHashes[h]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[h] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(labelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index 2980614df..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,626 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/beorn7/perks/quantile" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. - Observe(float64) -} - -// DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v0.10 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// Great fuck-up with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = DefObjectives - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: makeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *metricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index 8d5f10523..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63f4..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index eb248f108..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: makeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) - return labelPairs -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 14ed9e856..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// metricVec is a Collector to bundle metrics of the same name that differ in -// their label values. metricVec is not used directly (and therefore -// unexported). It is used as a building block for implementations of vectors of -// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. -// It also handles label currying. It uses basicMetricVec internally. -type metricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// newMetricVec returns an initialized metricVec. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { - return &metricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *metricVec) DeleteLabelValues(lvs ...string) bool { - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *metricVec) Delete(labels Labels) bool { - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) -} - -func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", label) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{i, val}) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &metricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *metricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", label) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.metrics, h) - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 49159bf3e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics exposed. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, -// respectively.) -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(labelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e410..000000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 9805432c2..000000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,629 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto - -package io_prometheus_client // import "github.com/prometheus/client_model/go" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} -} -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} -} -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (dst *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(dst, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} -} -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (dst *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(dst, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} -} -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (dst *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(dst, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} -} -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (dst *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(dst, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} -} -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (dst *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(dst, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (dst *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(dst, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} -} -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (dst *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(dst, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} -} -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (dst *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(dst, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } - -var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, - 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, - 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, - 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, - 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, - 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, - 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, - 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, - 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, - 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, - 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, - 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, - 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, - 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, - 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, - 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, - 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, - 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, - 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, - 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, - 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, - 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, - 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, - 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, - 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, - 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, - 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, - 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, - 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, - 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, - 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, - 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, - 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, - 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, - 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, - 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a5..000000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index c092723e8..000000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurrs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index 11839ed65..000000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) -} - -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - // Check for text format. - ver := ac.Params["version"] - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) - case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) - case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) - case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) - } - panic("expfmt.NewEncoder: unknown format") -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index c71bcb981..000000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire protocols. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dc2eedeef..000000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 8e473d0fe..000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - "sync" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialBufSize = 512 - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, initialBufSize)) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bytes.Buffer from the sync.Pool and write out its content to out in a - // single go in the end. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bytes.Buffer) - b.Reset() - w = b - defer func() { - bWritten, bErr := out.Write(b.Bytes()) - written = bWritten - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } else { - return escaper.WriteString(w, v) - } -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index ec3d86ba7..000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,757 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 26e92288c..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c7a..000000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106..000000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 038fc1c90..000000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index 41051a01a..000000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 6eda08a73..000000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index 00804b7fe..000000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b969170..000000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13c6..000000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index bb99889d2..000000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 46259b1f1..000000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - *t = Time(v + va) - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - -var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var ( - n, _ = strconv.Atoi(matches[1]) - dur = time.Duration(n) * time.Millisecond - ) - switch unit := matches[2]; unit { - case "y": - dur *= 1000 * 60 * 60 * 24 * 365 - case "w": - dur *= 1000 * 60 * 60 * 24 * 7 - case "d": - dur *= 1000 * 60 * 60 * 24 - case "h": - dur *= 1000 * 60 * 60 - case "m": - dur *= 1000 * 60 - case "s": - dur *= 1000 - case "ms": - // Value already correct - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) - } - return Duration(dur), nil -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - unit = "ms" - ) - if ms == 0 { - return "0s" - } - factors := map[string]int64{ - "y": 1000 * 60 * 60 * 24 * 365, - "w": 1000 * 60 * 60 * 24 * 7, - "d": 1000 * 60 * 60 * 24, - "h": 1000 * 60 * 60, - "m": 1000 * 60, - "s": 1000, - "ms": 1, - } - - switch int64(0) { - case ms % factors["y"]: - unit = "y" - case ms % factors["w"]: - unit = "w" - case ms % factors["d"]: - unit = "d" - case ms % factors["h"]: - unit = "h" - case ms % factors["m"]: - unit = "m" - case ms % factors["s"]: - unit = "s" - } - return fmt.Sprintf("%v%v", ms/factors[unit], unit) -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index c9d8fb1a2..000000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 25e3659ab..000000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index 438ca92ec..000000000 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,6 +0,0 @@ -# Run only staticcheck for now. Additional linters will be enabled one-by-one. -linters: - enable: - - staticcheck - - govet - disable-all: true diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 40503edbf..000000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index f1d3b9937..000000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,2 +0,0 @@ -* Tobias Schmidt @grobie -* Johannes 'fish' Ziemke @discordianfish diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 314d1ba56..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index 73052b3c0..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -PROMU_VERSION ?= 0.3.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -GOLANGCI_LINT := -GOLANGCI_LINT_VERSION ?= v1.16.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif - -.PHONY: common-test-short -common-test-short: - @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) - -.PHONY: common-format -common-format: - @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE - @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) - @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - . - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa1..000000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 209549471..000000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index d3a826807..000000000 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// NewBuddyInfo reads the buddyinfo statistics. -func NewBuddyInfo() ([]BuddyInfo, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewBuddyInfo() -} - -// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") - } - - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index e2acd6d40..000000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.NewStat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// -package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index f7f84ef36..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,1714 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 19 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 49 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 26 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0] sdb3[1] - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2] sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/alarm -Lines: 1 -2503000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=12229000 -POWER_SUPPLY_POWER_NOW=4830000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=50060000 -POWER_SUPPLY_ENERGY_NOW=49450000 -POWER_SUPPLY_CAPACITY=98 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 -44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index f7a151cc7..000000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "path" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod deleted file mode 100644 index 8a1b839fd..000000000 --- a/vendor/github.com/prometheus/procfs/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/prometheus/procfs - -require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum deleted file mode 100644 index 7827dd3d5..000000000 --- a/vendor/github.com/prometheus/procfs/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index e36d4a3bd..000000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return IPVSStats{}, err - } - - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := ioutil.ReadAll(file) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) - } - default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index 9dc19583d..000000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" -) - -var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device consists of. - DisksTotal int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 -} - -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) - if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") - for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { - continue - } - - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) - } - mdName := mainLine[0] - activityState := mainLine[2] - - if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) - } - - active, total, size, err := evalStatusline(lines[i+1]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - // j is the line number of the syncing-line. - j := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - } - - mdStates = append(mdStates, MDStat{ - Name: mdName, - ActivityState: activityState, - DisksActive: active, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - }) - } - - return mdStates, nil -} - -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - return active, total, size, nil -} - -func evalBuildline(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) - } - - return syncedBlocks, nil -} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index fc385afcf..000000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,616 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The optional mountaddr of the NFS mount. - MountAddress string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldOpts = "opts:" - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - - switch ss[0] { - case fieldOpts: - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 && split[0] == "mountaddr" { - stats.MountAddress = split[1] - } - } - case fieldAge: - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = *tstats - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) != numFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, - }) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport10UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) - } - case statVersion11: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport11UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) - } - default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - if protocol == "udp" { - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } - - return &NFSTransportStats{ - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index 3f2523371..000000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() -} - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) -} - -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - nd := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := nd.parseLine(s.Text()) - if err != nil { - return nd, err - } - - nd[line.Name] = *line - } - - return nd, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(parts[1])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(parts[0]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(nd)) - for _, ifc := range nd { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 06bed0ef4..000000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.NewProc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) - if err != nil { - return Proc{}, err - } - return fs.NewProc(pid) -} - -// NewProc returns a process for the given pid. -func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot) -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index 0251c83bf..000000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "os" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { - pio := ProcIO{} - - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index f04ba6fda..000000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime int64 - // Maximum size of files that the process may create. - FileSize int64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize int64 - // Maximum size of the process stack in bytes. - StackSize int64 - // Maximum size of a core file. - CoreFileSize int64 - // Limit of the process's resident set in pages. - ResidentSet int64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes int64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles int64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks int64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals int64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize int64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority int64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout int64 -} - -const ( - limitsFields = 3 - limitsUnlimited = "unlimited" -) - -var ( - limitsDelimiter = regexp.MustCompile(" +") -) - -// NewLimits returns the current soft limits of the process. -func (p Proc) NewLimits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) - } - - switch fields[0] { - case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) - case "Max file size": - l.FileSize, err = parseInt(fields[1]) - case "Max data size": - l.DataSize, err = parseInt(fields[1]) - case "Max stack size": - l.StackSize, err = parseInt(fields[1]) - case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) - case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) - case "Max processes": - l.Processes, err = parseInt(fields[1]) - case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) - case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) - case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) - case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) - case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) - case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) - case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseInt(s string) (int64, error) { - if s == limitsUnlimited { - return -1, nil - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) - } - return i, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index d06c26eba..000000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the -// process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index 4f11cdbdb..000000000 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "strings" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// NewPSIStatsForResource reads pressure stall information for the specified -// resource. At time of writing this can be either "cpu", "memory" or "io". -func NewPSIStatsForResource(resource string) (PSIStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return PSIStats{}, err - } - - return fs.NewPSIStatsForResource(resource) -} - -// NewPSIStatsForResource reads pressure stall information from /proc/pressure/ -func (fs FS) NewPSIStatsForResource(resource string) (PSIStats, error) { - file, err := os.Open(fs.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) - } - - defer file.Close() - return parsePSIStats(resource, file) -} - -// parsePSIStats parses the specified file for pressure stall information -func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - stats, err := ioutil.ReadAll(file) - if err != nil { - return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) - } - - for _, l := range strings.Split(string(stats), "\n") { - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index e7c626a8e..000000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime uint - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime uint - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize uint - // Resident set size in pages. - RSS int - - fs FS -} - -// NewStat returns the current status information of the process. -func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return ProcStat{}, err - } - - var ( - ignore int - - s = ProcStat{PID: p.PID, fs: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) - } - - s.Comm = string(data[l+1 : r]) - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignore, - &s.Starttime, - &s.VSize, - &s.RSS, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 61eb6b0e3..000000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU []CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt - - f, err := os.Open(fs.Path("stat")) - if err != nil { - return Stat{}, err - } - defer f.Close() - - stat := Stat{} - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return stat, nil -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index b0171a12b..000000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,389 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C

] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE - -while getopts :cf:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go deleted file mode 100644 index 8f1508f0f..000000000 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int - XfrmOutStateInvalid int - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf( - "couldn't parse %s line %s", file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml deleted file mode 100644 index 20dd53b8d..000000000 --- a/vendor/github.com/satori/go.uuid/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -sudo: false -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci -notifications: - email: false diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 926d54987..000000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2018 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md deleted file mode 100644 index 7b1a722df..000000000 --- a/vendor/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires Go >= 1.2. - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2018 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go deleted file mode 100644 index 656892c53..000000000 --- a/vendor/github.com/satori/go.uuid/codec.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "bytes" - "encoding/hex" - "fmt" -) - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// ABNF for supported UUID text representation follows: -// uuid := canonical | hashlike | braced | urn -// plain := canonical | hashlike -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct -// hashlike := 12hexoct -// braced := '{' plain '}' -// urn := URN ':' UUID-NID ':' plain -// URN := 'urn' -// UUID-NID := 'uuid' -// 12hexoct := 6hexoct 6hexoct -// 6hexoct := 4hexoct 2hexoct -// 4hexoct := 2hexoct 2hexoct -// 2hexoct := hexoct hexoct -// hexoct := hexdig hexdig -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' -func (u *UUID) UnmarshalText(text []byte) (err error) { - switch len(text) { - case 32: - return u.decodeHashLike(text) - case 36: - return u.decodeCanonical(text) - case 38: - return u.decodeBraced(text) - case 41: - fallthrough - case 45: - return u.decodeURN(text) - default: - return fmt.Errorf("uuid: incorrect UUID length: %s", text) - } -} - -// decodeCanonical decodes UUID string in format -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". -func (u *UUID) decodeCanonical(t []byte) (err error) { - if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { - return fmt.Errorf("uuid: incorrect UUID format %s", t) - } - - src := t[:] - dst := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - src = src[1:] // skip dash - } - _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) - if err != nil { - return - } - src = src[byteGroup:] - dst = dst[byteGroup/2:] - } - - return -} - -// decodeHashLike decodes UUID string in format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeHashLike(t []byte) (err error) { - src := t[:] - dst := u[:] - - if _, err = hex.Decode(dst, src); err != nil { - return err - } - return -} - -// decodeBraced decodes UUID string in format -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format -// "{6ba7b8109dad11d180b400c04fd430c8}". -func (u *UUID) decodeBraced(t []byte) (err error) { - l := len(t) - - if t[0] != '{' || t[l-1] != '}' { - return fmt.Errorf("uuid: incorrect UUID format %s", t) - } - - return u.decodePlain(t[1 : l-1]) -} - -// decodeURN decodes UUID string in format -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeURN(t []byte) (err error) { - total := len(t) - - urn_uuid_prefix := t[:9] - - if !bytes.Equal(urn_uuid_prefix, urnPrefix) { - return fmt.Errorf("uuid: incorrect UUID format: %s", t) - } - - return u.decodePlain(t[9:total]) -} - -// decodePlain decodes UUID string in canonical format -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodePlain(t []byte) (err error) { - switch len(t) { - case 32: - return u.decodeHashLike(t) - case 36: - return u.decodeCanonical(t) - default: - return fmt.Errorf("uuid: incorrrect UUID length: %s", t) - } -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != Size { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go deleted file mode 100644 index 3f2f1da2d..000000000 --- a/vendor/github.com/satori/go.uuid/generator.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "encoding/binary" - "hash" - "net" - "os" - "sync" - "time" -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -var ( - global = newDefaultGenerator() - - epochFunc = unixTimeFunc - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - return global.NewV1() -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - return global.NewV2(domain) -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - return global.NewV3(ns, name) -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - return global.NewV4() -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - return global.NewV5(ns, name) -} - -// Generator provides interface for generating UUIDs. -type Generator interface { - NewV1() UUID - NewV2(domain byte) UUID - NewV3(ns UUID, name string) UUID - NewV4() UUID - NewV5(ns UUID, name string) UUID -} - -// Default generator implementation. -type generator struct { - storageOnce sync.Once - storageMutex sync.Mutex - - lastTime uint64 - clockSequence uint16 - hardwareAddr [6]byte -} - -func newDefaultGenerator() Generator { - return &generator{} -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func (g *generator) NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := g.getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(V1) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func (g *generator) NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := g.getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(V2) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func (g *generator) NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(V3) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV4 returns random generated UUID. -func (g *generator) NewV4() UUID { - u := UUID{} - g.safeRandom(u[:]) - u.SetVersion(V4) - u.SetVariant(VariantRFC4122) - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func (g *generator) NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(V5) - u.SetVariant(VariantRFC4122) - - return u -} - -func (g *generator) initStorage() { - g.initClockSequence() - g.initHardwareAddr() -} - -func (g *generator) initClockSequence() { - buf := make([]byte, 2) - g.safeRandom(buf) - g.clockSequence = binary.BigEndian.Uint16(buf) -} - -func (g *generator) initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(g.hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - g.safeRandom(g.hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - g.hardwareAddr[0] |= 0x01 -} - -func (g *generator) safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func (g *generator) getStorage() (uint64, uint16, []byte) { - g.storageOnce.Do(g.initStorage) - - g.storageMutex.Lock() - defer g.storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= g.lastTime { - g.clockSequence++ - } - g.lastTime = timeNow - - return timeNow, g.clockSequence, g.hardwareAddr[:] -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go deleted file mode 100644 index 56759d390..000000000 --- a/vendor/github.com/satori/go.uuid/sql.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == Size { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index a2b8e2ca2..000000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2013-2018 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "encoding/hex" -) - -// Size of a UUID in bytes. -const Size = 16 - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [Size]byte - -// UUID versions -const ( - _ byte = iota - V1 - V2 - V3 - V4 - V5 -) - -// UUID layout variants. -const ( - VariantNCS byte = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -// Nil is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) -) - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() byte { - return u[6] >> 4 -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() byte { - switch { - case (u[8] >> 7) == 0x00: - return VariantNCS - case (u[8] >> 6) == 0x02: - return VariantRFC4122 - case (u[8] >> 5) == 0x06: - return VariantMicrosoft - case (u[8] >> 5) == 0x07: - fallthrough - default: - return VariantFuture - } -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits. -func (u *UUID) SetVariant(v byte) { - switch v { - case VariantNCS: - u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) - case VariantRFC4122: - u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) - case VariantMicrosoft: - u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) - case VariantFuture: - fallthrough - default: - u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) - } -} - -// Must is a helper that wraps a call to a function returning (UUID, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); -func Must(u UUID, err error) UUID { - if err != nil { - panic(err) - } - return u -} diff --git a/vendor/github.com/segmentio/kafka-go/.gitignore b/vendor/github.com/segmentio/kafka-go/.gitignore deleted file mode 100644 index c264a0aea..000000000 --- a/vendor/github.com/segmentio/kafka-go/.gitignore +++ /dev/null @@ -1,34 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/kafkacli - -# Emacs -*~ - -# Goland -.idea - -# govendor -/vendor/*/ diff --git a/vendor/github.com/segmentio/kafka-go/LICENSE b/vendor/github.com/segmentio/kafka-go/LICENSE deleted file mode 100644 index 09e136c51..000000000 --- a/vendor/github.com/segmentio/kafka-go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Segment - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/segmentio/kafka-go/README.md b/vendor/github.com/segmentio/kafka-go/README.md deleted file mode 100644 index 8e51f0b2f..000000000 --- a/vendor/github.com/segmentio/kafka-go/README.md +++ /dev/null @@ -1,301 +0,0 @@ -# kafka-go [![CircleCI](https://circleci.com/gh/segmentio/kafka-go.svg?style=shield)](https://circleci.com/gh/segmentio/kafka-go) [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/kafka-go)](https://goreportcard.com/report/github.com/segmentio/kafka-go) [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go) - -## Motivations - -We rely on both Go and Kafka a lot at Segment. Unfortunately, the state of the Go -client libraries for Kafka at the time of this writing was not ideal. The available -options were: - -- [sarama](https://github.com/Shopify/sarama), which is by far the most popular -but is quite difficult to work with. It is poorly documented, the API exposes -low level concepts of the Kafka protocol, and it doesn't support recent Go features -like [contexts](https://golang.org/pkg/context/). It also passes all values as -pointers which causes large numbers of dynamic memory allocations, more frequent -garbage collections, and higher memory usage. - -- [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) is a -cgo based wrapper around [librdkafka](https://github.com/edenhill/librdkafka), -which means it introduces a dependency to a C library on all Go code that uses -the package. It has much better documentation than sarama but still lacks support -for Go contexts. - -- [goka](https://github.com/lovoo/goka) is a more recent Kafka client for Go -which focuses on a specific usage pattern. It provides abstractions for using Kafka -as a message passing bus between services rather than an ordered log of events, but -this is not the typical use case of Kafka for us at Segment. The package also -depends on sarama for all interactions with Kafka. - -This is where `kafka-go` comes into play. It provides both low and high level -APIs for interacting with Kafka, mirroring concepts and implementing interfaces of -the Go standard library to make it easy to use and integrate with existing -software. - -## Connection [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Conn) - -The `Conn` type is the core of the `kafka-go` package. It wraps around a raw -network connection to expose a low-level API to a Kafka server. - -Here are some examples showing typical use of a connection object: -```go -// to produce messages -topic := "my-topic" -partition := 0 - -conn, _ := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition) - -conn.SetWriteDeadline(time.Now().Add(10*time.Second)) -conn.WriteMessages( - kafka.Message{Value: []byte("one!")}, - kafka.Message{Value: []byte("two!")}, - kafka.Message{Value: []byte("three!")}, -) - -conn.Close() -``` -```go -// to consume messages -topic := "my-topic" -partition := 0 - -conn, _ := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition) - -conn.SetReadDeadline(time.Now().Add(10*time.Second)) -batch := conn.ReadBatch(10e3, 1e6) // fetch 10KB min, 1MB max - -b := make([]byte, 10e3) // 10KB max per message -for { - _, err := batch.Read(b) - if err != nil { - break - } - fmt.Println(string(b)) -} - -batch.Close() -conn.Close() -``` - -Because it is low level, the `Conn` type turns out to be a great building block -for higher level abstractions, like the `Reader` for example. - -## Reader [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Reader) - -A `Reader` is another concept exposed by the `kafka-go` package, which intends -to make it simpler to implement the typical use case of consuming from a single -topic-partition pair. -A `Reader` also automatically handles reconnections and offset management, and -exposes an API that supports asynchronous cancellations and timeouts using Go -contexts. - -```go -// make a new reader that consumes from topic-A, partition 0, at offset 42 -r := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{"localhost:9092"}, - Topic: "topic-A", - Partition: 0, - MinBytes: 10e3, // 10KB - MaxBytes: 10e6, // 10MB -}) -r.SetOffset(42) - -for { - m, err := r.ReadMessage(context.Background()) - if err != nil { - break - } - fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value)) -} - -r.Close() -``` - -### Consumer Groups - -```kafka-go``` also supports Kafka consumer groups including broker managed offsets. -To enable consumer groups, simplify specify the GroupID in the ReaderConfig. - -ReadMessage automatically commits offsets when using consumer groups. - -```go -// make a new reader that consumes from topic-A -r := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{"localhost:9092"}, - GroupID: "consumer-group-id", - Topic: "topic-A", - MinBytes: 10e3, // 10KB - MaxBytes: 10e6, // 10MB -}) - -for { - m, err := r.ReadMessage(context.Background()) - if err != nil { - break - } - fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) -} - -r.Close() -``` - -There are a number of limitations when using consumer groups: - -* ```(*Reader).SetOffset``` will return an error when GroupID is set -* ```(*Reader).Offset``` will always return ```-1``` when GroupID is set -* ```(*Reader).Lag``` will always return ```-1``` when GroupID is set -* ```(*Reader).ReadLag``` will return an error when GroupID is set -* ```(*Reader).Stats``` will return a partition of ```-1``` when GroupID is set - -### Explicit Commits - -```kafka-go``` also supports explicit commits. Instead of calling ```ReadMessage```, -call ```FetchMessage``` followed by ```CommitMessages```. - -```go -ctx := context.Background() -for { - m, err := r.FetchMessage(ctx) - if err != nil { - break - } - fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) - r.CommitMessages(ctx, m) -} -``` - -### Managing Commits - -By default, CommitMessages will synchronously commit offsets to Kafka. For -improved performance, you can instead periodically commit offsets to Kafka -by setting CommitInterval on the ReaderConfig. - - -```go -// make a new reader that consumes from topic-A -r := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{"localhost:9092"}, - GroupID: "consumer-group-id", - Topic: "topic-A", - MinBytes: 10e3, // 10KB - MaxBytes: 10e6, // 10MB - CommitInterval: time.Second, // flushes commits to Kafka every second -}) -``` - -## Writer [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Writer) - -To produce messages to Kafka, a program may use the low-level `Conn` API, but -the package also provides a higher level `Writer` type which is more appropriate -to use in most cases as it provides additional features: - -- Automatic retries and reconnections on errors. -- Configurable distribution of messages across available partitions. -- Synchronous or asynchronous writes of messages to Kafka. -- Asynchronous cancellation using contexts. -- Flushing of pending messages on close to support graceful shutdowns. - -```go -// make a writer that produces to topic-A, using the least-bytes distribution -w := kafka.NewWriter(kafka.WriterConfig{ - Brokers: []string{"localhost:9092"}, - Topic: "topic-A", - Balancer: &kafka.LeastBytes{}, -}) - -w.WriteMessages(context.Background(), - kafka.Message{ - Key: []byte("Key-A"), - Value: []byte("Hello World!"), - }, - kafka.Message{ - Key: []byte("Key-B"), - Value: []byte("One!"), - }, - kafka.Message{ - Key: []byte("Key-C"), - Value: []byte("Two!"), - }, -) - -w.Close() -``` - -**Note:** Even though kafka.Message contain ```Topic``` and ```Partition``` fields, they **MUST NOT** be -set when writing messages. They are intended for read use only. - -### Compatibility with Sarama - -If you're switching from Sarama and need/want to use the same algorithm for message -partitioning, you can use the ```kafka.Hash``` balancer. ```kafka.Hash``` routes -messages to the same partitions that sarama's default partitioner would route to. - -```go -w := kafka.NewWriter(kafka.WriterConfig{ - Brokers: []string{"localhost:9092"}, - Topic: "topic-A", - Balancer: &kafka.Hash{}, -}) -``` - -### Compression - -Compression can be enable on the writer : - -```go -w := kafka.NewWriter(kafka.WriterConfig{ - Brokers: []string{"localhost:9092"}, - Topic: "topic-A", - CompressionCodec: snappy.NewCompressionCodec(), -}) -``` - -The reader will by default figure out if the consumed messages are compressed by intepreting the message attributes. - -## TLS Support - -For a bare bones Conn type or in the Reader/Writer configs you can specify a dialer option for TLS support. If the TLS field is nil, it will not connect with TLS. - -### Connection - -```go -dialer := &kafka.Dialer{ - Timeout: 10 * time.Second, - DualStack: true, - TLS: &tls.Config{...tls config...}, -} - -conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093") -``` - -### Reader - -```go -dialer := &kafka.Dialer{ - Timeout: 10 * time.Second, - DualStack: true, - TLS: &tls.Config{...tls config...}, -} - -r := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{"localhost:9093"}, - GroupID: "consumer-group-id", - Topic: "topic-A", - Dialer: dialer, -}) -``` - -### Writer - -```go -dialer := &kafka.Dialer{ - Timeout: 10 * time.Second, - DualStack: true, - TLS: &tls.Config{...tls config...}, -} - -w := kafka.NewWriter(kafka.WriterConfig{ - Brokers: []string{"localhost:9093"}, - Topic: "topic-A", - Balancer: &kafka.Hash{}, - Dialer: dialer, -}) -``` diff --git a/vendor/github.com/segmentio/kafka-go/balancer.go b/vendor/github.com/segmentio/kafka-go/balancer.go deleted file mode 100644 index cac92417e..000000000 --- a/vendor/github.com/segmentio/kafka-go/balancer.go +++ /dev/null @@ -1,160 +0,0 @@ -package kafka - -import ( - "hash" - "hash/fnv" - "sort" - "sync" -) - -// The Balancer interface provides an abstraction of the message distribution -// logic used by Writer instances to route messages to the partitions available -// on a kafka cluster. -// -// Instances of Balancer do not have to be safe to use concurrently by multiple -// goroutines, the Writer implementation ensures that calls to Balance are -// synchronized. -type Balancer interface { - // Balance receives a message and a set of available partitions and - // returns the partition number that the message should be routed to. - // - // An application should refrain from using a balancer to manage multiple - // sets of partitions (from different topics for examples), use one balancer - // instance for each partition set, so the balancer can detect when the - // partitions change and assume that the kafka topic has been rebalanced. - Balance(msg Message, partitions ...int) (partition int) -} - -// BalancerFunc is an implementation of the Balancer interface that makes it -// possible to use regular functions to distribute messages across partitions. -type BalancerFunc func(Message, ...int) int - -// Balance calls f, satisfies the Balancer interface. -func (f BalancerFunc) Balance(msg Message, partitions ...int) int { - return f(msg, partitions...) -} - -// RoundRobin is an Balancer implementation that equally distributes messages -// across all available partitions. -type RoundRobin struct { - offset uint64 -} - -// Balance satisfies the Balancer interface. -func (rr *RoundRobin) Balance(msg Message, partitions ...int) int { - length := uint64(len(partitions)) - offset := rr.offset - rr.offset++ - return partitions[offset%length] -} - -// LeastBytes is a Balancer implementation that routes messages to the partition -// that has received the least amount of data. -// -// Note that no coordination is done between multiple producers, having good -// balancing relies on the fact that each producer using a LeastBytes balancer -// should produce well balanced messages. -type LeastBytes struct { - counters []leastBytesCounter -} - -type leastBytesCounter struct { - partition int - bytes uint64 -} - -// Balance satisfies the Balancer interface. -func (lb *LeastBytes) Balance(msg Message, partitions ...int) int { - for _, p := range partitions { - if c := lb.counterOf(p); c == nil { - lb.counters = lb.makeCounters(partitions...) - break - } - } - - minBytes := lb.counters[0].bytes - minIndex := 0 - - for i, c := range lb.counters[1:] { - if c.bytes < minBytes { - minIndex = i + 1 - minBytes = c.bytes - } - } - - c := &lb.counters[minIndex] - c.bytes += uint64(len(msg.Key)) + uint64(len(msg.Value)) - return c.partition -} - -func (lb *LeastBytes) counterOf(partition int) *leastBytesCounter { - i := sort.Search(len(lb.counters), func(i int) bool { - return lb.counters[i].partition >= partition - }) - if i == len(lb.counters) || lb.counters[i].partition != partition { - return nil - } - return &lb.counters[i] -} - -func (lb *LeastBytes) makeCounters(partitions ...int) (counters []leastBytesCounter) { - counters = make([]leastBytesCounter, len(partitions)) - - for i, p := range partitions { - counters[i].partition = p - } - - sort.Slice(counters, func(i int, j int) bool { - return counters[i].partition < counters[j].partition - }) - return -} - -var ( - fnv1aPool = &sync.Pool{ - New: func() interface{} { - return fnv.New32a() - }, - } -) - -// Hash is a Balancer that uses the provided hash function to determine which -// partition to route messages to. This ensures that messages with the same key -// are routed to the same partition. -// -// The logic to calculate the partition is: -// -// hasher.Sum32() % len(partitions) => partition -// -// By default, Hash uses the FNV-1a algorithm. This is the same algorithm used -// by the Sarama Producer and ensures that messages produced by kafka-go will -// be delivered to the same topics that the Sarama producer would be delivered to -type Hash struct { - rr RoundRobin - Hasher hash.Hash32 -} - -func (h *Hash) Balance(msg Message, partitions ...int) (partition int) { - if msg.Key == nil { - return h.rr.Balance(msg, partitions...) - } - - hasher := h.Hasher - if hasher == nil { - hasher = fnv1aPool.Get().(hash.Hash32) - defer fnv1aPool.Put(hasher) - } - - hasher.Reset() - if _, err := hasher.Write(msg.Key); err != nil { - panic(err) - } - - // uses same algorithm that Sarama's hashPartitioner uses - partition = int(hasher.Sum32()) % len(partitions) - if partition < 0 { - partition = -partition - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/batch.go b/vendor/github.com/segmentio/kafka-go/batch.go deleted file mode 100644 index c5e998c51..000000000 --- a/vendor/github.com/segmentio/kafka-go/batch.go +++ /dev/null @@ -1,213 +0,0 @@ -package kafka - -import ( - "bufio" - "io" - "sync" - "time" -) - -// A Batch is an iterator over a sequence of messages fetched from a kafka -// server. -// -// Batches are created by calling (*Conn).ReadBatch. They hold a internal lock -// on the connection, which is released when the batch is closed. Failing to -// call a batch's Close method will likely result in a dead-lock when trying to -// use the connection. -// -// Batches are safe to use concurrently from multiple goroutines. -type Batch struct { - mutex sync.Mutex - conn *Conn - lock *sync.Mutex - msgs *messageSetReader - deadline time.Time - throttle time.Duration - topic string - partition int - offset int64 - highWaterMark int64 - err error -} - -// Throttle gives the throttling duration applied by the kafka server on the -// connection. -func (batch *Batch) Throttle() time.Duration { - return batch.throttle -} - -// Watermark returns the current highest watermark in a partition. -func (batch *Batch) HighWaterMark() int64 { - return batch.highWaterMark -} - -// Offset returns the offset of the next message in the batch. -func (batch *Batch) Offset() int64 { - batch.mutex.Lock() - offset := batch.offset - batch.mutex.Unlock() - return offset -} - -// Close closes the batch, releasing the connection lock and returning an error -// if reading the batch failed for any reason. -func (batch *Batch) Close() error { - batch.mutex.Lock() - err := batch.close() - batch.mutex.Unlock() - return err -} - -func (batch *Batch) close() (err error) { - conn := batch.conn - lock := batch.lock - - batch.conn = nil - batch.lock = nil - if batch.msgs != nil { - batch.msgs.discard() - } - - if err = batch.err; err == io.EOF { - err = nil - } - - if conn != nil { - conn.rdeadline.unsetConnReadDeadline() - conn.mutex.Lock() - conn.offset = batch.offset - conn.mutex.Unlock() - - if err != nil { - if _, ok := err.(Error); !ok && err != io.ErrShortBuffer { - conn.Close() - } - } - } - - if lock != nil { - lock.Unlock() - } - - return -} - -// Read reads the value of the next message from the batch into b, returning the -// number of bytes read, or an error if the next message couldn't be read. -// -// If an error is returned the batch cannot be used anymore and calling Read -// again will keep returning that error. All errors except io.EOF (indicating -// that the program consumed all messages from the batch) are also returned by -// Close. -// -// The method fails with io.ErrShortBuffer if the buffer passed as argument is -// too small to hold the message value. -func (batch *Batch) Read(b []byte) (int, error) { - n := 0 - - batch.mutex.Lock() - offset := batch.offset - - _, _, err := batch.readMessage( - func(r *bufio.Reader, size int, nbytes int) (int, error) { - if nbytes < 0 { - return size, nil - } - return discardN(r, size, nbytes) - }, - func(r *bufio.Reader, size int, nbytes int) (int, error) { - if nbytes < 0 { - return size, nil - } - n = nbytes // return value - if nbytes > len(b) { - nbytes = len(b) - } - nbytes, err := io.ReadFull(r, b[:nbytes]) - if err != nil { - return size - nbytes, err - } - return discardN(r, size-nbytes, n-nbytes) - }, - ) - - if err == nil && n > len(b) { - n, err = len(b), io.ErrShortBuffer - batch.err = io.ErrShortBuffer - batch.offset = offset // rollback - } - - batch.mutex.Unlock() - return n, err -} - -// ReadMessage reads and return the next message from the batch. -// -// Because this method allocate memory buffers for the message key and value -// it is less memory-efficient than Read, but has the advantage of never -// failing with io.ErrShortBuffer. -func (batch *Batch) ReadMessage() (Message, error) { - msg := Message{} - batch.mutex.Lock() - - offset, timestamp, err := batch.readMessage( - func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { - msg.Key, remain, err = readNewBytes(r, size, nbytes) - return - }, - func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { - msg.Value, remain, err = readNewBytes(r, size, nbytes) - return - }, - ) - - batch.mutex.Unlock() - msg.Topic = batch.topic - msg.Partition = batch.partition - msg.Offset = offset - msg.Time = timestampToTime(timestamp) - - return msg, err -} - -func (batch *Batch) readMessage( - key func(*bufio.Reader, int, int) (int, error), - val func(*bufio.Reader, int, int) (int, error), -) (offset int64, timestamp int64, err error) { - if err = batch.err; err != nil { - return - } - - offset, timestamp, err = batch.msgs.readMessage(batch.offset, key, val) - switch err { - case nil: - batch.offset = offset + 1 - case errShortRead: - // As an "optimization" kafka truncates the returned response after - // producing MaxBytes, which could then cause the code to return - // errShortRead. - err = batch.msgs.discard() - switch { - case err != nil: - batch.err = err - case batch.msgs.remaining() == 0: - // Because we use the adjusted deadline we could end up returning - // before the actual deadline occurred. This is necessary otherwise - // timing out the connection for real could end up leaving it in an - // unpredictable state, which would require closing it. - // This design decision was made to maximize the chances of keeping - // the connection open, the trade off being to lose precision on the - // read deadline management. - if !batch.deadline.IsZero() && time.Now().After(batch.deadline) { - err = RequestTimedOut - } else { - err = io.EOF - } - batch.err = err - } - default: - batch.err = err - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/commit.go b/vendor/github.com/segmentio/kafka-go/commit.go deleted file mode 100644 index e7740d58a..000000000 --- a/vendor/github.com/segmentio/kafka-go/commit.go +++ /dev/null @@ -1,39 +0,0 @@ -package kafka - -// A commit represents the instruction of publishing an update of the last -// offset read by a program for a topic and partition. -type commit struct { - topic string - partition int - offset int64 -} - -// makeCommit builds a commit value from a message, the resulting commit takes -// its topic, partition, and offset from the message. -func makeCommit(msg Message) commit { - return commit{ - topic: msg.Topic, - partition: msg.Partition, - offset: msg.Offset + 1, - } -} - -// makeCommits generates a slice of commits from a list of messages, it extracts -// the topic, partition, and offset of each message and builds the corresponding -// commit slice. -func makeCommits(msgs ...Message) []commit { - commits := make([]commit, len(msgs)) - - for i, m := range msgs { - commits[i] = makeCommit(m) - } - - return commits -} - -// commitRequest is the data type exchanged between the CommitMessages method -// and internals of the reader's implementation. -type commitRequest struct { - commits []commit - errch chan<- error -} diff --git a/vendor/github.com/segmentio/kafka-go/compression.go b/vendor/github.com/segmentio/kafka-go/compression.go deleted file mode 100644 index 386138b68..000000000 --- a/vendor/github.com/segmentio/kafka-go/compression.go +++ /dev/null @@ -1,52 +0,0 @@ -package kafka - -import ( - "errors" - "sync" -) - -var errUnknownCodec = errors.New("invalid codec") - -var codecs = make(map[int8]CompressionCodec) -var codecsMutex sync.RWMutex - -// RegisterCompressionCodec registers a compression codec so it can be used by a Writer. -func RegisterCompressionCodec(codec func() CompressionCodec) { - c := codec() - codecsMutex.Lock() - codecs[c.Code()] = c - codecsMutex.Unlock() -} - -// resolveCodec looks up a codec by Code() -func resolveCodec(code int8) (codec CompressionCodec, err error) { - codecsMutex.RLock() - codec = codecs[code] - codecsMutex.RUnlock() - - if codec == nil { - err = errUnknownCodec - } - return -} - -// CompressionCodec represents a compression codec to encode and decode -// the messages. -// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression -// -// A CompressionCodec must be safe for concurrent access by multiple go -// routines. -type CompressionCodec interface { - // Code returns the compression codec code - Code() int8 - - // Encode encodes the src data - Encode(src []byte) ([]byte, error) - - // Decode decodes the src data - Decode(src []byte) ([]byte, error) -} - -const compressionCodecMask int8 = 0x03 -const DefaultCompressionLevel int = -1 -const CompressionNoneCode = 0 diff --git a/vendor/github.com/segmentio/kafka-go/conn.go b/vendor/github.com/segmentio/kafka-go/conn.go deleted file mode 100644 index 853ea6945..000000000 --- a/vendor/github.com/segmentio/kafka-go/conn.go +++ /dev/null @@ -1,1074 +0,0 @@ -package kafka - -import ( - "bufio" - "errors" - "fmt" - "math" - "net" - "os" - "path/filepath" - "runtime" - "sync" - "sync/atomic" - "time" -) - -var ( - errInvalidWriteTopic = errors.New("writes must NOT set Topic on kafka.Message") - errInvalidWritePartition = errors.New("writes must NOT set Partition on kafka.Message") -) - -// Broker carries the metadata associated with a kafka broker. -type Broker struct { - Host string - Port int - ID int - Rack string -} - -// Partition carries the metadata associated with a kafka partition. -type Partition struct { - Topic string - Leader Broker - Replicas []Broker - Isr []Broker - ID int -} - -// Conn represents a connection to a kafka broker. -// -// Instances of Conn are safe to use concurrently from multiple goroutines. -type Conn struct { - // base network connection - conn net.Conn - - // offset management (synchronized on the mutex field) - mutex sync.Mutex - offset int64 - - // read buffer (synchronized on rlock) - rlock sync.Mutex - rbuf bufio.Reader - - // write buffer (synchronized on wlock) - wlock sync.Mutex - wbuf bufio.Writer - - // deadline management - wdeadline connDeadline - rdeadline connDeadline - - // immutable values of the connection object - clientID string - topic string - partition int32 - fetchMaxBytes int32 - fetchMinSize int32 - - // correlation ID generator (synchronized on wlock) - correlationID int32 - - // number of replica acks required when publishing to a partition - requiredAcks int32 -} - -// ConnConfig is a configuration object used to create new instances of Conn. -type ConnConfig struct { - ClientID string - Topic string - Partition int -} - -var ( - // DefaultClientID is the default value used as ClientID of kafka - // connections. - DefaultClientID string -) - -func init() { - progname := filepath.Base(os.Args[0]) - hostname, _ := os.Hostname() - DefaultClientID = fmt.Sprintf("%s@%s (github.com/segmentio/kafka-go)", progname, hostname) -} - -// NewConn returns a new kafka connection for the given topic and partition. -func NewConn(conn net.Conn, topic string, partition int) *Conn { - return NewConnWith(conn, ConnConfig{ - Topic: topic, - Partition: partition, - }) -} - -// NewConnWith returns a new kafka connection configured with config. -// The offset is initialized to FirstOffset. -func NewConnWith(conn net.Conn, config ConnConfig) *Conn { - if len(config.ClientID) == 0 { - config.ClientID = DefaultClientID - } - - if config.Partition < 0 || config.Partition > math.MaxInt32 { - panic(fmt.Sprintf("invalid partition number: %d", config.Partition)) - } - - c := &Conn{ - conn: conn, - rbuf: *bufio.NewReader(conn), - wbuf: *bufio.NewWriter(conn), - clientID: config.ClientID, - topic: config.Topic, - partition: int32(config.Partition), - offset: FirstOffset, - requiredAcks: -1, - } - - // The fetch request needs to ask for a MaxBytes value that is at least - // enough to load the control data of the response. To avoid having to - // recompute it on every read, it is cached here in the Conn value. - c.fetchMinSize = (fetchResponseV2{ - Topics: []fetchResponseTopicV2{{ - TopicName: config.Topic, - Partitions: []fetchResponsePartitionV2{{ - Partition: int32(config.Partition), - MessageSet: messageSet{{}}, - }}, - }}, - }).size() - c.fetchMaxBytes = math.MaxInt32 - c.fetchMinSize - return c -} - -// DeleteTopics deletes the specified topics. -func (c *Conn) DeleteTopics(topics ...string) error { - _, err := c.deleteTopics(deleteTopicsRequestV1{ - Topics: topics, - }) - return err -} - -// describeGroups retrieves the specified groups -// -// See http://kafka.apache.org/protocol.html#The_Messages_DescribeGroups -func (c *Conn) describeGroups(request describeGroupsRequestV1) (describeGroupsResponseV1, error) { - var response describeGroupsResponseV1 - - err := c.readOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(describeGroupsRequest, v1, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return describeGroupsResponseV1{}, err - } - for _, group := range response.Groups { - if group.ErrorCode != 0 { - return describeGroupsResponseV1{}, Error(group.ErrorCode) - } - } - - return response, nil -} - -// findCoordinator finds the coordinator for the specified group or transaction -// -// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator -func (c *Conn) findCoordinator(request findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { - var response findCoordinatorResponseV0 - - err := c.readOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(groupCoordinatorRequest, v0, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return findCoordinatorResponseV0{}, err - } - if response.ErrorCode != 0 { - return findCoordinatorResponseV0{}, Error(response.ErrorCode) - } - - return response, nil -} - -// heartbeat sends a heartbeat message required by consumer groups -// -// See http://kafka.apache.org/protocol.html#The_Messages_Heartbeat -func (c *Conn) heartbeat(request heartbeatRequestV0) (heartbeatResponseV0, error) { - var response heartbeatResponseV0 - - err := c.writeOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(heartbeatRequest, v0, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return heartbeatResponseV0{}, err - } - if response.ErrorCode != 0 { - return heartbeatResponseV0{}, Error(response.ErrorCode) - } - - return response, nil -} - -// joinGroup attempts to join a consumer group -// -// See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup -func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) { - var response joinGroupResponseV1 - - err := c.writeOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(joinGroupRequest, v1, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return joinGroupResponseV1{}, err - } - if response.ErrorCode != 0 { - return joinGroupResponseV1{}, Error(response.ErrorCode) - } - - return response, nil -} - -// leaveGroup leaves the consumer from the consumer group -// -// See http://kafka.apache.org/protocol.html#The_Messages_LeaveGroup -func (c *Conn) leaveGroup(request leaveGroupRequestV0) (leaveGroupResponseV0, error) { - var response leaveGroupResponseV0 - - err := c.writeOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(leaveGroupRequest, v0, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return leaveGroupResponseV0{}, err - } - if response.ErrorCode != 0 { - return leaveGroupResponseV0{}, Error(response.ErrorCode) - } - - return response, nil -} - -// listGroups lists all the consumer groups -// -// See http://kafka.apache.org/protocol.html#The_Messages_ListGroups -func (c *Conn) listGroups(request listGroupsRequestV1) (listGroupsResponseV1, error) { - var response listGroupsResponseV1 - - err := c.readOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(listGroupsRequest, v1, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return listGroupsResponseV1{}, err - } - if response.ErrorCode != 0 { - return listGroupsResponseV1{}, Error(response.ErrorCode) - } - - return response, nil -} - -// offsetCommit commits the specified topic partition offsets -// -// See http://kafka.apache.org/protocol.html#The_Messages_OffsetCommit -func (c *Conn) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) { - var response offsetCommitResponseV2 - - err := c.writeOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(offsetCommitRequest, v2, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return offsetCommitResponseV2{}, err - } - for _, r := range response.Responses { - for _, pr := range r.PartitionResponses { - if pr.ErrorCode != 0 { - return offsetCommitResponseV2{}, Error(pr.ErrorCode) - } - } - } - - return response, nil -} - -// offsetFetch fetches the offsets for the specified topic partitions. -// -1 indicates that there is no offset saved for the partition. -// -// See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch -func (c *Conn) offsetFetch(request offsetFetchRequestV1) (offsetFetchResponseV1, error) { - var response offsetFetchResponseV1 - - err := c.readOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(offsetFetchRequest, v1, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return offsetFetchResponseV1{}, err - } - for _, r := range response.Responses { - for _, pr := range r.PartitionResponses { - if pr.ErrorCode != 0 { - return offsetFetchResponseV1{}, Error(pr.ErrorCode) - } - } - } - - return response, nil -} - -// syncGroups completes the handshake to join a consumer group -// -// See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup -func (c *Conn) syncGroups(request syncGroupRequestV0) (syncGroupResponseV0, error) { - var response syncGroupResponseV0 - - err := c.readOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(syncGroupRequest, v0, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return syncGroupResponseV0{}, err - } - if response.ErrorCode != 0 { - return syncGroupResponseV0{}, Error(response.ErrorCode) - } - - return response, nil -} - -// Close closes the kafka connection. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// SetDeadline sets the read and write deadlines associated with the connection. -// It is equivalent to calling both SetReadDeadline and SetWriteDeadline. -// -// A deadline is an absolute time after which I/O operations fail with a timeout -// (see type Error) instead of blocking. The deadline applies to all future and -// pending I/O, not just the immediately following call to Read or Write. After -// a deadline has been exceeded, the connection may be closed if it was found to -// be in an unrecoverable state. -// -// A zero value for t means I/O operations will not time out. -func (c *Conn) SetDeadline(t time.Time) error { - c.rdeadline.setDeadline(t) - c.wdeadline.setDeadline(t) - return nil -} - -// SetReadDeadline sets the deadline for future Read calls and any -// currently-blocked Read call. -// A zero value for t means Read will not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - c.rdeadline.setDeadline(t) - return nil -} - -// SetWriteDeadline sets the deadline for future Write calls and any -// currently-blocked Write call. -// Even if write times out, it may return n > 0, indicating that some of the -// data was successfully written. -// A zero value for t means Write will not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.wdeadline.setDeadline(t) - return nil -} - -// Offset returns the current offset of the connection as pair of integers, -// where the first one is an offset value and the second one indicates how -// to interpret it. -// -// See Seek for more details about the offset and whence values. -func (c *Conn) Offset() (offset int64, whence int) { - c.mutex.Lock() - offset = c.offset - c.mutex.Unlock() - - switch offset { - case FirstOffset: - offset = 0 - whence = SeekStart - case LastOffset: - offset = 0 - whence = SeekEnd - default: - whence = SeekAbsolute - } - return -} - -const ( - SeekStart = 0 // Seek relative to the first offset available in the partition. - SeekAbsolute = 1 // Seek to an absolute offset. - SeekEnd = 2 // Seek relative to the last offset available in the partition. - SeekCurrent = 3 // Seek relative to the current offset. -) - -// Seek sets the offset for the next read or write operation according to whence, which -// should be one of SeekStart, SeekAbsolute, SeekEnd, or SeekCurrent. -// When seeking relative to the end, the offset is subtracted from the current offset. -// Note that for historical reasons, these do not align with the usual whence constants -// as in lseek(2) or os.Seek. -// The method returns the new absolute offset of the connection. -func (c *Conn) Seek(offset int64, whence int) (int64, error) { - switch whence { - case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent: - default: - return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence) - } - - if whence == SeekAbsolute { - c.mutex.Lock() - unchanged := offset == c.offset - c.mutex.Unlock() - if unchanged { - return offset, nil - } - } - if whence == SeekCurrent { - c.mutex.Lock() - offset = c.offset + offset - c.mutex.Unlock() - } - - first, last, err := c.ReadOffsets() - if err != nil { - return 0, err - } - - switch whence { - case SeekStart: - offset = first + offset - case SeekEnd: - offset = last - offset - } - - if offset < first || offset > last { - return 0, OffsetOutOfRange - } - - c.mutex.Lock() - c.offset = offset - c.mutex.Unlock() - return offset, nil -} - -// Read reads the message at the current offset from the connection, advancing -// the offset on success so the next call to a read method will produce the next -// message. -// The method returns the number of bytes read, or an error if something went -// wrong. -// -// While it is safe to call Read concurrently from multiple goroutines it may -// be hard for the program to predict the results as the connection offset will -// be read and written by multiple goroutines, they could read duplicates, or -// messages may be seen by only some of the goroutines. -// -// The method fails with io.ErrShortBuffer if the buffer passed as argument is -// too small to hold the message value. -// -// This method is provided to satisfy the net.Conn interface but is much less -// efficient than using the more general purpose ReadBatch method. -func (c *Conn) Read(b []byte) (int, error) { - batch := c.ReadBatch(1, len(b)) - n, err := batch.Read(b) - return n, coalesceErrors(silentEOF(err), batch.Close()) -} - -// ReadMessage reads the message at the current offset from the connection, -// advancing the offset on success so the next call to a read method will -// produce the next message. -// -// Because this method allocate memory buffers for the message key and value -// it is less memory-efficient than Read, but has the advantage of never -// failing with io.ErrShortBuffer. -// -// While it is safe to call Read concurrently from multiple goroutines it may -// be hard for the program to predict the results as the connection offset will -// be read and written by multiple goroutines, they could read duplicates, or -// messages may be seen by only some of the goroutines. -// -// This method is provided for convenience purposes but is much less efficient -// than using the more general purpose ReadBatch method. -func (c *Conn) ReadMessage(maxBytes int) (Message, error) { - batch := c.ReadBatch(1, maxBytes) - msg, err := batch.ReadMessage() - return msg, coalesceErrors(silentEOF(err), batch.Close()) -} - -// ReadBatch reads a batch of messages from the kafka server. The method always -// returns a non-nil Batch value. If an error occurred, either sending the fetch -// request or reading the response, the error will be made available by the -// returned value of the batch's Close method. -// -// While it is safe to call ReadBatch concurrently from multiple goroutines it -// may be hard for the program to predict the results as the connection offset -// will be read and written by multiple goroutines, they could read duplicates, -// or messages may be seen by only some of the goroutines. -// -// A program doesn't specify the number of messages in wants from a batch, but -// gives the minimum and maximum number of bytes that it wants to receive from -// the kafka server. -func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch { - var adjustedDeadline time.Time - var maxFetch = int(c.fetchMaxBytes) - - if minBytes < 0 || minBytes > maxFetch { - return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", minBytes, maxFetch)} - } - if maxBytes < 0 || maxBytes > maxFetch { - return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", maxBytes, maxFetch)} - } - if minBytes > maxBytes { - return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", minBytes, maxBytes)} - } - - offset, err := c.Seek(c.Offset()) - if err != nil { - return &Batch{err: dontExpectEOF(err)} - } - - id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error { - now := time.Now() - deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) - adjustedDeadline = deadline - return writeFetchRequestV2( - &c.wbuf, - id, - c.clientID, - c.topic, - c.partition, - offset, - minBytes, - maxBytes+int(c.fetchMinSize), - deadlineToTimeout(deadline, now), - ) - }) - if err != nil { - return &Batch{err: dontExpectEOF(err)} - } - - _, size, lock, err := c.waitResponse(&c.rdeadline, id) - if err != nil { - return &Batch{err: dontExpectEOF(err)} - } - - throttle, highWaterMark, remain, err := readFetchResponseHeader(&c.rbuf, size) - return &Batch{ - conn: c, - msgs: newMessageSetReader(&c.rbuf, remain), - deadline: adjustedDeadline, - throttle: duration(throttle), - lock: lock, - topic: c.topic, // topic is copied to Batch to prevent race with Batch.close - partition: int(c.partition), // partition is copied to Batch to prevent race with Batch.close - offset: offset, - highWaterMark: highWaterMark, - err: dontExpectEOF(err), - } -} - -// ReadOffset returns the offset of the first message with a timestamp equal or -// greater to t. -func (c *Conn) ReadOffset(t time.Time) (int64, error) { - return c.readOffset(timestamp(t)) -} - -// ReadFirstOffset returns the first offset available on the connection. -func (c *Conn) ReadFirstOffset() (int64, error) { - return c.readOffset(FirstOffset) -} - -// ReadLastOffset returns the last offset available on the connection. -func (c *Conn) ReadLastOffset() (int64, error) { - return c.readOffset(LastOffset) -} - -// ReadOffsets returns the absolute first and last offsets of the topic used by -// the connection. -func (c *Conn) ReadOffsets() (first, last int64, err error) { - // We have to submit two different requests to fetch the first and last - // offsets because kafka refuses requests that ask for multiple offsets - // on the same topic and partition. - if first, err = c.ReadFirstOffset(); err != nil { - return - } - if last, err = c.ReadLastOffset(); err != nil { - first = 0 // don't leak the value on error - return - } - return -} - -func (c *Conn) readOffset(t int64) (offset int64, err error) { - err = c.readOperation( - func(deadline time.Time, id int32) error { - return writeListOffsetRequestV1(&c.wbuf, id, c.clientID, c.topic, c.partition, t) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { - // We skip the topic name because we've made a request for - // a single topic. - size, err := discardString(r, size) - if err != nil { - return size, err - } - - // Reading the array of partitions, there will be only one - // partition which gives the offset we're looking for. - return readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { - var p partitionOffsetV1 - size, err := p.readFrom(r, size) - if err != nil { - return size, err - } - if p.ErrorCode != 0 { - return size, Error(p.ErrorCode) - } - offset = p.Offset - return size, nil - }) - })) - }, - ) - return -} - -// ReadPartitions returns the list of available partitions for the given list of -// topics. -// -// If the method is called with no topic, it uses the topic configured on the -// connection. If there are none, the method fetches all partitions of the kafka -// cluster. -func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err error) { - defaultTopics := [...]string{c.topic} - - if len(topics) == 0 && len(c.topic) != 0 { - topics = defaultTopics[:] - } - - err = c.readOperation( - func(deadline time.Time, id int32) error { - return c.writeRequest(metadataRequest, v1, id, topicMetadataRequestV1(topics)) - }, - func(deadline time.Time, size int) error { - var res metadataResponseV1 - - if err := c.readResponse(size, &res); err != nil { - return err - } - - brokers := make(map[int32]Broker, len(res.Brokers)) - for _, b := range res.Brokers { - brokers[b.NodeID] = Broker{ - Host: b.Host, - Port: int(b.Port), - ID: int(b.NodeID), - Rack: b.Rack, - } - } - - makeBrokers := func(ids ...int32) []Broker { - b := make([]Broker, len(ids)) - for i, id := range ids { - b[i] = brokers[id] - } - return b - } - - for _, t := range res.Topics { - if t.TopicErrorCode != 0 && t.TopicName == c.topic { - // We only report errors if they happened for the topic of - // the connection, otherwise the topic will simply have no - // partitions in the result set. - return Error(t.TopicErrorCode) - } - for _, p := range t.Partitions { - partitions = append(partitions, Partition{ - Topic: t.TopicName, - Leader: brokers[p.Leader], - Replicas: makeBrokers(p.Replicas...), - Isr: makeBrokers(p.Isr...), - ID: int(p.PartitionID), - }) - } - } - return nil - }, - ) - return -} - -// Write writes a message to the kafka broker that this connection was -// established to. The method returns the number of bytes written, or an error -// if something went wrong. -// -// The operation either succeeds or fail, it never partially writes the message. -// -// This method is exposed to satisfy the net.Conn interface but is less efficient -// than the more general purpose WriteMessages method. -func (c *Conn) Write(b []byte) (int, error) { - return c.WriteCompressedMessages(nil, Message{Value: b}) -} - -// WriteMessages writes a batch of messages to the connection's topic and -// partition, returning the number of bytes written. The write is an atomic -// operation, it either fully succeeds or fails. -func (c *Conn) WriteMessages(msgs ...Message) (int, error) { - return c.WriteCompressedMessages(nil, msgs...) -} - -// WriteCompressedMessages writes a batch of messages to the connection's topic -// and partition, returning the number of bytes written. The write is an atomic -// operation, it either fully succeeds or fails. -// -// If the compression codec is not nil, the messages will be compressed. -func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (int, error) { - if len(msgs) == 0 { - return 0, nil - } - - writeTime := time.Now() - n := 0 - for i, msg := range msgs { - // users may believe they can set the Topic and/or Partition - // on the kafka message. - if msg.Topic != "" && msg.Topic != c.topic { - return 0, errInvalidWriteTopic - } - if msg.Partition != 0 { - return 0, errInvalidWritePartition - } - - if msg.Time.IsZero() { - msgs[i].Time = writeTime - } - - n += len(msg.Key) + len(msg.Value) - } - - err := c.writeOperation( - func(deadline time.Time, id int32) error { - now := time.Now() - deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) - return writeProduceRequestV2( - &c.wbuf, - codec, - id, - c.clientID, - c.topic, - c.partition, - deadlineToTimeout(deadline, now), - int16(atomic.LoadInt32(&c.requiredAcks)), - msgs..., - ) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { - // Skip the topic, we've produced the message to only one topic, - // no need to waste resources loading it in memory. - size, err := discardString(r, size) - if err != nil { - return size, err - } - - // Read the list of partitions, there should be only one since - // we've produced a message to a single partition. - size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { - var p produceResponsePartitionV2 - size, err := p.readFrom(r, size) - if err == nil && p.ErrorCode != 0 { - err = Error(p.ErrorCode) - } - return size, err - }) - if err != nil { - return size, err - } - - // The response is trailed by the throttle time, also skipping - // since it's not interesting here. - return discardInt32(r, size) - })) - }, - ) - - if err != nil { - n = 0 - } - - return n, err -} - -// SetRequiredAcks sets the number of acknowledges from replicas that the -// connection requests when producing messages. -func (c *Conn) SetRequiredAcks(n int) error { - switch n { - case -1, 1: - atomic.StoreInt32(&c.requiredAcks, int32(n)) - return nil - default: - return InvalidRequiredAcks - } -} - -func (c *Conn) writeRequestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32, size int32) { - hdr := c.requestHeader(apiKey, apiVersion, correlationID) - hdr.Size = (hdr.size() + size) - 4 - hdr.writeTo(&c.wbuf) -} - -func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error { - hdr := c.requestHeader(apiKey, apiVersion, correlationID) - hdr.Size = (hdr.size() + req.size()) - 4 - hdr.writeTo(&c.wbuf) - req.writeTo(&c.wbuf) - return c.wbuf.Flush() -} - -func (c *Conn) readResponse(size int, res interface{}) error { - size, err := read(&c.rbuf, size, res) - switch err.(type) { - case Error: - var e error - if size, e = discardN(&c.rbuf, size, size); e != nil { - err = e - } - } - return expectZeroSize(size, err) -} - -func (c *Conn) peekResponseSizeAndID() (int32, int32, error) { - b, err := c.rbuf.Peek(8) - if err != nil { - return 0, 0, err - } - size, id := makeInt32(b[:4]), makeInt32(b[4:]) - return size, id, nil -} - -func (c *Conn) skipResponseSizeAndID() { - c.rbuf.Discard(8) -} - -func (c *Conn) readDeadline() time.Time { - return c.rdeadline.deadline() -} - -func (c *Conn) writeDeadline() time.Time { - return c.wdeadline.deadline() -} - -func (c *Conn) readOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { - return c.do(&c.rdeadline, write, read) -} - -func (c *Conn) writeOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { - return c.do(&c.wdeadline, write, read) -} - -func (c *Conn) do(d *connDeadline, write func(time.Time, int32) error, read func(time.Time, int) error) error { - id, err := c.doRequest(d, write) - if err != nil { - return err - } - - deadline, size, lock, err := c.waitResponse(d, id) - if err != nil { - return err - } - - if err = read(deadline, size); err != nil { - switch err.(type) { - case Error: - default: - c.conn.Close() - } - } - - d.unsetConnReadDeadline() - lock.Unlock() - return err -} - -func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (id int32, err error) { - c.wlock.Lock() - c.correlationID++ - id = c.correlationID - err = write(d.setConnWriteDeadline(c.conn), id) - d.unsetConnWriteDeadline() - - if err != nil { - // When an error occurs there's no way to know if the connection is in a - // recoverable state so we're better off just giving up at this point to - // avoid any risk of corrupting the following operations. - c.conn.Close() - } - - c.wlock.Unlock() - return -} - -func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size int, lock *sync.Mutex, err error) { - for { - var rsz int32 - var rid int32 - - c.rlock.Lock() - deadline = d.setConnReadDeadline(c.conn) - - if rsz, rid, err = c.peekResponseSizeAndID(); err != nil { - d.unsetConnReadDeadline() - c.conn.Close() - c.rlock.Unlock() - return - } - - if id == rid { - c.skipResponseSizeAndID() - size, lock = int(rsz-4), &c.rlock - return - } - - // Optimistically release the read lock if a response has already - // been received but the current operation is not the target for it. - c.rlock.Unlock() - runtime.Gosched() - } -} - -func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader { - return requestHeader{ - ApiKey: int16(apiKey), - ApiVersion: int16(apiVersion), - CorrelationID: correlationID, - ClientID: c.clientID, - } -} - -// connDeadline is a helper type to implement read/write deadline management on -// the kafka connection. -type connDeadline struct { - mutex sync.Mutex - value time.Time - rconn net.Conn - wconn net.Conn -} - -func (d *connDeadline) deadline() time.Time { - d.mutex.Lock() - t := d.value - d.mutex.Unlock() - return t -} - -func (d *connDeadline) setDeadline(t time.Time) { - d.mutex.Lock() - d.value = t - - if d.rconn != nil { - d.rconn.SetReadDeadline(t) - } - - if d.wconn != nil { - d.wconn.SetWriteDeadline(t) - } - - d.mutex.Unlock() -} - -func (d *connDeadline) setConnReadDeadline(conn net.Conn) time.Time { - d.mutex.Lock() - deadline := d.value - d.rconn = conn - d.rconn.SetReadDeadline(deadline) - d.mutex.Unlock() - return deadline -} - -func (d *connDeadline) setConnWriteDeadline(conn net.Conn) time.Time { - d.mutex.Lock() - deadline := d.value - d.wconn = conn - d.wconn.SetWriteDeadline(deadline) - d.mutex.Unlock() - return deadline -} - -func (d *connDeadline) unsetConnReadDeadline() { - d.mutex.Lock() - d.rconn = nil - d.mutex.Unlock() -} - -func (d *connDeadline) unsetConnWriteDeadline() { - d.mutex.Lock() - d.wconn = nil - d.mutex.Unlock() -} diff --git a/vendor/github.com/segmentio/kafka-go/crc32.go b/vendor/github.com/segmentio/kafka-go/crc32.go deleted file mode 100644 index f1a617f02..000000000 --- a/vendor/github.com/segmentio/kafka-go/crc32.go +++ /dev/null @@ -1,80 +0,0 @@ -package kafka - -import ( - "bytes" - "encoding/binary" - "hash/crc32" - "sync" -) - -func crc32OfMessage(magicByte int8, attributes int8, timestamp int64, key []byte, value []byte) uint32 { - b := acquireCrc32Buffer() - b.writeInt8(magicByte) - b.writeInt8(attributes) - if magicByte != 0 { - b.writeInt64(timestamp) - } - b.writeBytes(key) - b.writeBytes(value) - sum := b.sum - releaseCrc32Buffer(b) - return sum -} - -type crc32Buffer struct { - sum uint32 - buf bytes.Buffer -} - -func (c *crc32Buffer) writeInt8(i int8) { - c.buf.Truncate(0) - c.buf.WriteByte(byte(i)) - c.update() -} - -func (c *crc32Buffer) writeInt32(i int32) { - a := [4]byte{} - binary.BigEndian.PutUint32(a[:], uint32(i)) - c.buf.Truncate(0) - c.buf.Write(a[:]) - c.update() -} - -func (c *crc32Buffer) writeInt64(i int64) { - a := [8]byte{} - binary.BigEndian.PutUint64(a[:], uint64(i)) - c.buf.Truncate(0) - c.buf.Write(a[:]) - c.update() -} - -func (c *crc32Buffer) writeBytes(b []byte) { - if b == nil { - c.writeInt32(-1) - } else { - c.writeInt32(int32(len(b))) - } - c.sum = crc32Update(c.sum, b) -} - -func (c *crc32Buffer) update() { - c.sum = crc32Update(c.sum, c.buf.Bytes()) -} - -func crc32Update(sum uint32, b []byte) uint32 { - return crc32.Update(sum, crc32.IEEETable, b) -} - -var crc32BufferPool = sync.Pool{ - New: func() interface{} { return &crc32Buffer{} }, -} - -func acquireCrc32Buffer() *crc32Buffer { - c := crc32BufferPool.Get().(*crc32Buffer) - c.sum = 0 - return c -} - -func releaseCrc32Buffer(b *crc32Buffer) { - crc32BufferPool.Put(b) -} diff --git a/vendor/github.com/segmentio/kafka-go/createtopics.go b/vendor/github.com/segmentio/kafka-go/createtopics.go deleted file mode 100644 index 51bb21fb1..000000000 --- a/vendor/github.com/segmentio/kafka-go/createtopics.go +++ /dev/null @@ -1,267 +0,0 @@ -package kafka - -import ( - "bufio" - "time" -) - -type ConfigEntry struct { - ConfigName string - ConfigValue string -} - -func (c ConfigEntry) toCreateTopicsRequestV0ConfigEntry() createTopicsRequestV0ConfigEntry { - return createTopicsRequestV0ConfigEntry{ - ConfigName: c.ConfigName, - ConfigValue: c.ConfigValue, - } -} - -type createTopicsRequestV0ConfigEntry struct { - ConfigName string - ConfigValue string -} - -func (t createTopicsRequestV0ConfigEntry) size() int32 { - return sizeofString(t.ConfigName) + - sizeofString(t.ConfigValue) -} - -func (t createTopicsRequestV0ConfigEntry) writeTo(w *bufio.Writer) { - writeString(w, t.ConfigName) - writeString(w, t.ConfigValue) -} - -type ReplicaAssignment struct { - Partition int - Replicas int -} - -func (a ReplicaAssignment) toCreateTopicsRequestV0ReplicaAssignment() createTopicsRequestV0ReplicaAssignment { - return createTopicsRequestV0ReplicaAssignment{ - Partition: int32(a.Partition), - Replicas: int32(a.Replicas), - } -} - -type createTopicsRequestV0ReplicaAssignment struct { - Partition int32 - Replicas int32 -} - -func (t createTopicsRequestV0ReplicaAssignment) size() int32 { - return sizeofInt32(t.Partition) + - sizeofInt32(t.Replicas) -} - -func (t createTopicsRequestV0ReplicaAssignment) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt32(w, t.Replicas) -} - -type TopicConfig struct { - // Topic name - Topic string - - // NumPartitions created. -1 indicates unset. - NumPartitions int - - // ReplicationFactor for the topic. -1 indicates unset. - ReplicationFactor int - - // ReplicaAssignments among kafka brokers for this topic partitions. If this - // is set num_partitions and replication_factor must be unset. - ReplicaAssignments []ReplicaAssignment - - // ConfigEntries holds topic level configuration for topic to be set. - ConfigEntries []ConfigEntry -} - -func (t TopicConfig) toCreateTopicsRequestV0Topic() createTopicsRequestV0Topic { - var requestV0ReplicaAssignments []createTopicsRequestV0ReplicaAssignment - for _, a := range t.ReplicaAssignments { - requestV0ReplicaAssignments = append( - requestV0ReplicaAssignments, - a.toCreateTopicsRequestV0ReplicaAssignment()) - } - var requestV0ConfigEntries []createTopicsRequestV0ConfigEntry - for _, c := range t.ConfigEntries { - requestV0ConfigEntries = append( - requestV0ConfigEntries, - c.toCreateTopicsRequestV0ConfigEntry()) - } - - return createTopicsRequestV0Topic{ - Topic: t.Topic, - NumPartitions: int32(t.NumPartitions), - ReplicationFactor: int16(t.ReplicationFactor), - ReplicaAssignments: requestV0ReplicaAssignments, - ConfigEntries: requestV0ConfigEntries, - } -} - -type createTopicsRequestV0Topic struct { - // Topic name - Topic string - - // NumPartitions created. -1 indicates unset. - NumPartitions int32 - - // ReplicationFactor for the topic. -1 indicates unset. - ReplicationFactor int16 - - // ReplicaAssignments among kafka brokers for this topic partitions. If this - // is set num_partitions and replication_factor must be unset. - ReplicaAssignments []createTopicsRequestV0ReplicaAssignment - - // ConfigEntries holds topic level configuration for topic to be set. - ConfigEntries []createTopicsRequestV0ConfigEntry -} - -func (t createTopicsRequestV0Topic) size() int32 { - return sizeofString(t.Topic) + - sizeofInt32(t.NumPartitions) + - sizeofInt16(t.ReplicationFactor) + - sizeofArray(len(t.ReplicaAssignments), func(i int) int32 { return t.ReplicaAssignments[i].size() }) + - sizeofArray(len(t.ConfigEntries), func(i int) int32 { return t.ConfigEntries[i].size() }) -} - -func (t createTopicsRequestV0Topic) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt32(w, t.NumPartitions) - writeInt16(w, t.ReplicationFactor) - writeArray(w, len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(w) }) - writeArray(w, len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(w) }) -} - -// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics -type createTopicsRequestV0 struct { - // Topics contains n array of single topic creation requests. Can not - // have multiple entries for the same topic. - Topics []createTopicsRequestV0Topic - - // Timeout ms to wait for a topic to be completely created on the - // controller node. Values <= 0 will trigger topic creation and return immediately - Timeout int32 -} - -func (t createTopicsRequestV0) size() int32 { - return sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) + - sizeofInt32(t.Timeout) -} - -func (t createTopicsRequestV0) writeTo(w *bufio.Writer) { - writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) }) - writeInt32(w, t.Timeout) -} - -type createTopicsResponseV0TopicError struct { - // Topic name - Topic string - - // ErrorCode holds response error code - ErrorCode int16 -} - -func (t createTopicsResponseV0TopicError) size() int32 { - return sizeofString(t.Topic) + - sizeofInt16(t.ErrorCode) -} - -func (t createTopicsResponseV0TopicError) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt16(w, t.ErrorCode) -} - -func (t *createTopicsResponseV0TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.Topic); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } - return -} - -// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics -type createTopicsResponseV0 struct { - TopicErrors []createTopicsResponseV0TopicError -} - -func (t createTopicsResponseV0) size() int32 { - return sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() }) -} - -func (t createTopicsResponseV0) writeTo(w *bufio.Writer) { - writeArray(w, len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(w) }) -} - -func (t *createTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - var topic createTopicsResponseV0TopicError - if fnRemain, fnErr = (&topic).readFrom(r, size); err != nil { - return - } - t.TopicErrors = append(t.TopicErrors, topic) - return - } - if remain, err = readArrayWith(r, size, fn); err != nil { - return - } - - return -} - -func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponseV0, error) { - var response createTopicsResponseV0 - - err := c.writeOperation( - func(deadline time.Time, id int32) error { - if request.Timeout == 0 { - now := time.Now() - deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) - request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) - } - return c.writeRequest(createTopicsRequest, v0, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return response, err - } - for _, tr := range response.TopicErrors { - if tr.ErrorCode != 0 { - return response, Error(tr.ErrorCode) - } - } - - return response, nil -} - -// CreateTopics creates one topic per provided configuration with idempotent -// operational semantics. In other words, if CreateTopics is invoked with a -// configuration for an existing topic, it will have no effect. -func (c *Conn) CreateTopics(topics ...TopicConfig) error { - var requestV0Topics []createTopicsRequestV0Topic - for _, t := range topics { - requestV0Topics = append( - requestV0Topics, - t.toCreateTopicsRequestV0Topic()) - } - - _, err := c.createTopics(createTopicsRequestV0{ - Topics: requestV0Topics, - }) - - switch err { - case TopicAlreadyExists: - // ok - return nil - default: - return err - } -} diff --git a/vendor/github.com/segmentio/kafka-go/deletetopics.go b/vendor/github.com/segmentio/kafka-go/deletetopics.go deleted file mode 100644 index 983bff7a0..000000000 --- a/vendor/github.com/segmentio/kafka-go/deletetopics.go +++ /dev/null @@ -1,124 +0,0 @@ -package kafka - -import ( - "bufio" - "time" -) - -// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics -type deleteTopicsRequestV1 struct { - // Topics holds the topic names - Topics []string - - // Timeout holds the time in ms to wait for a topic to be completely deleted - // on the controller node. Values <= 0 will trigger topic deletion and return - // immediately. - Timeout int32 -} - -func (t deleteTopicsRequestV1) size() int32 { - return sizeofStringArray(t.Topics) + - sizeofInt32(t.Timeout) -} - -func (t deleteTopicsRequestV1) writeTo(w *bufio.Writer) { - writeStringArray(w, t.Topics) - writeInt32(w, t.Timeout) -} - -type deleteTopicsResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - - // TopicErrorCodes holds per topic error codes - TopicErrorCodes []deleteTopicsResponseV1TopicErrorCode -} - -func (t deleteTopicsResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() }) -} - -func (t *deleteTopicsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - var item deleteTopicsResponseV1TopicErrorCode - if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { - return - } - t.TopicErrorCodes = append(t.TopicErrorCodes, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - return -} - -func (t deleteTopicsResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(w) }) -} - -type deleteTopicsResponseV1TopicErrorCode struct { - // Topic holds the topic name - Topic string - - // ErrorCode holds the error code - ErrorCode int16 -} - -func (t deleteTopicsResponseV1TopicErrorCode) size() int32 { - return sizeofString(t.Topic) + - sizeofInt16(t.ErrorCode) -} - -func (t *deleteTopicsResponseV1TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.Topic); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } - return -} - -func (t deleteTopicsResponseV1TopicErrorCode) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt16(w, t.ErrorCode) -} - -// deleteTopics deletes the specified topics. -// -// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics -func (c *Conn) deleteTopics(request deleteTopicsRequestV1) (deleteTopicsResponseV1, error) { - var response deleteTopicsResponseV1 - err := c.writeOperation( - func(deadline time.Time, id int32) error { - if request.Timeout == 0 { - now := time.Now() - deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) - request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) - } - return c.writeRequest(deleteTopicsRequest, v1, id, request) - }, - func(deadline time.Time, size int) error { - return expectZeroSize(func() (remain int, err error) { - return (&response).readFrom(&c.rbuf, size) - }()) - }, - ) - if err != nil { - return deleteTopicsResponseV1{}, err - } - for _, c := range response.TopicErrorCodes { - if c.ErrorCode != 0 { - return response, Error(c.ErrorCode) - } - } - return response, nil -} diff --git a/vendor/github.com/segmentio/kafka-go/describegroups.go b/vendor/github.com/segmentio/kafka-go/describegroups.go deleted file mode 100644 index a7b7982ac..000000000 --- a/vendor/github.com/segmentio/kafka-go/describegroups.go +++ /dev/null @@ -1,186 +0,0 @@ -package kafka - -import "bufio" - -// See http://kafka.apache.org/protocol.html#The_Messages_DescribeGroups -type describeGroupsRequestV1 struct { - // List of groupIds to request metadata for (an empty groupId array - // will return empty group metadata). - GroupIDs []string -} - -func (t describeGroupsRequestV1) size() int32 { - return sizeofStringArray(t.GroupIDs) -} - -func (t describeGroupsRequestV1) writeTo(w *bufio.Writer) { - writeStringArray(w, t.GroupIDs) -} - -type describeGroupsResponseMemberV1 struct { - // MemberID assigned by the group coordinator - MemberID string - - // ClientID used in the member's latest join group request - ClientID string - - // ClientHost used in the request session corresponding to the member's - // join group. - ClientHost string - - // MemberMetadata the metadata corresponding to the current group protocol - // in use (will only be present if the group is stable). - MemberMetadata []byte - - // MemberAssignments provided by the group leader (will only be present if - // the group is stable). - // - // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol - MemberAssignments []byte -} - -func (t describeGroupsResponseMemberV1) size() int32 { - return sizeofString(t.MemberID) + - sizeofString(t.ClientID) + - sizeofString(t.ClientHost) + - sizeofBytes(t.MemberMetadata) + - sizeofBytes(t.MemberAssignments) -} - -func (t describeGroupsResponseMemberV1) writeTo(w *bufio.Writer) { - writeString(w, t.MemberID) - writeString(w, t.ClientID) - writeString(w, t.ClientHost) - writeBytes(w, t.MemberMetadata) - writeBytes(w, t.MemberAssignments) -} - -func (t *describeGroupsResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.MemberID); err != nil { - return - } - if remain, err = readString(r, remain, &t.ClientID); err != nil { - return - } - if remain, err = readString(r, remain, &t.ClientHost); err != nil { - return - } - if remain, err = readBytes(r, remain, &t.MemberMetadata); err != nil { - return - } - if remain, err = readBytes(r, remain, &t.MemberAssignments); err != nil { - return - } - return -} - -type describeGroupsResponseGroupV1 struct { - // ErrorCode holds response error code - ErrorCode int16 - - // GroupID holds the unique group identifier - GroupID string - - // State holds current state of the group (one of: Dead, Stable, AwaitingSync, - // PreparingRebalance, or empty if there is no active group) - State string - - // ProtocolType holds the current group protocol type (will be empty if there is - // no active group) - ProtocolType string - - // Protocol holds the current group protocol (only provided if the group is Stable) - Protocol string - - // Members contains the current group members (only provided if the group is not Dead) - Members []describeGroupsResponseMemberV1 -} - -func (t describeGroupsResponseGroupV1) size() int32 { - return sizeofInt16(t.ErrorCode) + - sizeofString(t.GroupID) + - sizeofString(t.State) + - sizeofString(t.ProtocolType) + - sizeofString(t.Protocol) + - sizeofArray(len(t.Members), func(i int) int32 { return t.Members[i].size() }) -} - -func (t describeGroupsResponseGroupV1) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) - writeString(w, t.GroupID) - writeString(w, t.State) - writeString(w, t.ProtocolType) - writeString(w, t.Protocol) - writeArray(w, len(t.Members), func(i int) { t.Members[i].writeTo(w) }) -} - -func (t *describeGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { - return - } - if remain, err = readString(r, remain, &t.GroupID); err != nil { - return - } - if remain, err = readString(r, remain, &t.State); err != nil { - return - } - if remain, err = readString(r, remain, &t.ProtocolType); err != nil { - return - } - if remain, err = readString(r, remain, &t.Protocol); err != nil { - return - } - - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - item := describeGroupsResponseMemberV1{} - if fnRemain, fnErr = (&item).readFrom(r, size); err != nil { - return - } - t.Members = append(t.Members, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - return -} - -type describeGroupsResponseV1 struct { - // Duration in milliseconds for which the request was throttled due - // to quota violation (Zero if the request did not violate any quota) - ThrottleTimeMS int32 - - // Groups holds selected group information - Groups []describeGroupsResponseGroupV1 -} - -func (t describeGroupsResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) -} - -func (t describeGroupsResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.Groups), func(i int) { t.Groups[i].writeTo(w) }) -} - -func (t *describeGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - item := describeGroupsResponseGroupV1{} - if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil { - return - } - t.Groups = append(t.Groups, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/dialer.go b/vendor/github.com/segmentio/kafka-go/dialer.go deleted file mode 100644 index 15887f2f8..000000000 --- a/vendor/github.com/segmentio/kafka-go/dialer.go +++ /dev/null @@ -1,365 +0,0 @@ -package kafka - -import ( - "context" - "crypto/tls" - "net" - "strconv" - "strings" - "time" -) - -// The Dialer type mirrors the net.Dialer API but is designed to open kafka -// connections instead of raw network connections. -type Dialer struct { - // Unique identifier for client connections established by this Dialer. - ClientID string - - // Timeout is the maximum amount of time a dial will wait for a connect to - // complete. If Deadline is also set, it may fail earlier. - // - // The default is no timeout. - // - // When dialing a name with multiple IP addresses, the timeout may be - // divided between them. - // - // With or without a timeout, the operating system may impose its own - // earlier timeout. For instance, TCP timeouts are often around 3 minutes. - Timeout time.Duration - - // Deadline is the absolute point in time after which dials will fail. - // If Timeout is set, it may fail earlier. - // Zero means no deadline, or dependent on the operating system as with the - // Timeout option. - Deadline time.Time - - // LocalAddr is the local address to use when dialing an address. - // The address must be of a compatible type for the network being dialed. - // If nil, a local address is automatically chosen. - LocalAddr net.Addr - - // DualStack enables RFC 6555-compliant "Happy Eyeballs" dialing when the - // network is "tcp" and the destination is a host name with both IPv4 and - // IPv6 addresses. This allows a client to tolerate networks where one - // address family is silently broken. - DualStack bool - - // FallbackDelay specifies the length of time to wait before spawning a - // fallback connection, when DualStack is enabled. - // If zero, a default delay of 300ms is used. - FallbackDelay time.Duration - - // KeepAlive specifies the keep-alive period for an active network - // connection. - // If zero, keep-alives are not enabled. Network protocols that do not - // support keep-alives ignore this field. - KeepAlive time.Duration - - // Resolver optionally specifies an alternate resolver to use. - Resolver Resolver - - // TLS enables Dialer to open secure connections. If nil, standard net.Conn - // will be used. - TLS *tls.Config -} - -// Dial connects to the address on the named network. -func (d *Dialer) Dial(network string, address string) (*Conn, error) { - return d.DialContext(context.Background(), network, address) -} - -// DialContext connects to the address on the named network using the provided -// context. -// -// The provided Context must be non-nil. If the context expires before the -// connection is complete, an error is returned. Once successfully connected, -// any expiration of the context will not affect the connection. -// -// When using TCP, and the host in the address parameter resolves to multiple -// network addresses, any dial timeout (from d.Timeout or ctx) is spread over -// each consecutive dial, such that each is given an appropriate fraction of the -// time to connect. For example, if a host has 4 IP addresses and the timeout is -// 1 minute, the connect to each single address will be given 15 seconds to -// complete before trying the next one. -func (d *Dialer) DialContext(ctx context.Context, network string, address string) (*Conn, error) { - if d.Timeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, d.Timeout) - defer cancel() - } - - if !d.Deadline.IsZero() { - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, d.Deadline) - defer cancel() - } - - c, err := d.dialContext(ctx, network, address) - if err != nil { - return nil, err - } - return NewConnWith(c, ConnConfig{ClientID: d.ClientID}), nil -} - -// DialLeader opens a connection to the leader of the partition for a given -// topic. -// -// The address given to the DialContext method may not be the one that the -// connection will end up being established to, because the dialer will lookup -// the partition leader for the topic and return a connection to that server. -// The original address is only used as a mechanism to discover the -// configuration of the kafka cluster that we're connecting to. -func (d *Dialer) DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { - p, err := d.LookupPartition(ctx, network, address, topic, partition) - if err != nil { - return nil, err - } - return d.DialPartition(ctx, network, address, p) -} - -// DialPartition opens a connection to the leader of the partition specified by partition -// descriptor. It's strongly advised to use descriptor of the partition that comes out of -// functions LookupPartition or LookupPartitions. -func (d *Dialer) DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { - c, err := d.dialContext(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port))) - if err != nil { - return nil, err - } - - return NewConnWith(c, ConnConfig{ - ClientID: d.ClientID, - Topic: partition.Topic, - Partition: partition.ID, - }), nil -} - -// LookupLeader searches for the kafka broker that is the leader of the -// partition for a given topic, returning a Broker value representing it. -func (d *Dialer) LookupLeader(ctx context.Context, network string, address string, topic string, partition int) (Broker, error) { - p, err := d.LookupPartition(ctx, network, address, topic, partition) - return p.Leader, err -} - -// LookupPartition searches for the description of specified partition id. -func (d *Dialer) LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { - c, err := d.DialContext(ctx, network, address) - if err != nil { - return Partition{}, err - } - defer c.Close() - - brkch := make(chan Partition, 1) - errch := make(chan error, 1) - - go func() { - for attempt := 0; true; attempt++ { - if attempt != 0 { - sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) - } - - partitions, err := c.ReadPartitions(topic) - if err != nil { - if isTemporary(err) { - continue - } - errch <- err - return - } - - for _, p := range partitions { - if p.ID == partition { - brkch <- p - return - } - } - } - - errch <- UnknownTopicOrPartition - }() - - var prt Partition - select { - case prt = <-brkch: - case err = <-errch: - case <-ctx.Done(): - err = ctx.Err() - } - return prt, err -} - -// LookupPartitions returns the list of partitions that exist for the given topic. -func (d *Dialer) LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { - conn, err := d.DialContext(ctx, network, address) - if err != nil { - return nil, err - } - defer conn.Close() - - prtch := make(chan []Partition, 1) - errch := make(chan error, 1) - - go func() { - if prt, err := conn.ReadPartitions(topic); err != nil { - errch <- err - } else { - prtch <- prt - } - }() - - var prt []Partition - select { - case prt = <-prtch: - case err = <-errch: - case <-ctx.Done(): - err = ctx.Err() - } - return prt, err -} - -// connectTLS returns a tls.Conn that has already completed the Handshake -func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn, config *tls.Config) (tlsConn *tls.Conn, err error) { - tlsConn = tls.Client(conn, config) - errch := make(chan error) - - go func() { - defer close(errch) - errch <- tlsConn.Handshake() - }() - - select { - case <-ctx.Done(): - conn.Close() - tlsConn.Close() - <-errch // ignore possible error from Handshake - err = ctx.Err() - - case err = <-errch: - } - - return -} - -func (d *Dialer) dialContext(ctx context.Context, network string, address string) (net.Conn, error) { - if r := d.Resolver; r != nil { - host, port := splitHostPort(address) - addrs, err := r.LookupHost(ctx, host) - if err != nil { - return nil, err - } - if len(addrs) != 0 { - address = addrs[0] - } - if len(port) != 0 { - address, _ = splitHostPort(address) - address = net.JoinHostPort(address, port) - } - } - - conn, err := (&net.Dialer{ - LocalAddr: d.LocalAddr, - DualStack: d.DualStack, - FallbackDelay: d.FallbackDelay, - KeepAlive: d.KeepAlive, - }).DialContext(ctx, network, address) - if err != nil { - return nil, err - } - - if d.TLS != nil { - c := d.TLS - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if c.ServerName == "" { - c = d.TLS.Clone() - // Copied from tls.go in the standard library. - colonPos := strings.LastIndex(address, ":") - if colonPos == -1 { - colonPos = len(address) - } - hostname := address[:colonPos] - c.ServerName = hostname - } - return d.connectTLS(ctx, conn, c) - } - - return conn, nil -} - -// DefaultDialer is the default dialer used when none is specified. -var DefaultDialer = &Dialer{ - Timeout: 10 * time.Second, - DualStack: true, -} - -// Dial is a convenience wrapper for DefaultDialer.Dial. -func Dial(network string, address string) (*Conn, error) { - return DefaultDialer.Dial(network, address) -} - -// DialContext is a convenience wrapper for DefaultDialer.DialContext. -func DialContext(ctx context.Context, network string, address string) (*Conn, error) { - return DefaultDialer.DialContext(ctx, network, address) -} - -// DialLeader is a convenience wrapper for DefaultDialer.DialLeader. -func DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { - return DefaultDialer.DialLeader(ctx, network, address, topic, partition) -} - -// DialPartition is a convenience wrapper for DefaultDialer.DialPartition. -func DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { - return DefaultDialer.DialPartition(ctx, network, address, partition) -} - -// LookupPartition is a convenience wrapper for DefaultDialer.LookupPartition. -func LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { - return DefaultDialer.LookupPartition(ctx, network, address, topic, partition) -} - -// LookupPartitions is a convenience wrapper for DefaultDialer.LookupPartitions. -func LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { - return DefaultDialer.LookupPartitions(ctx, network, address, topic) -} - -// The Resolver interface is used as an abstraction to provide service discovery -// of the hosts of a kafka cluster. -type Resolver interface { - // LookupHost looks up the given host using the local resolver. - // It returns a slice of that host's addresses. - LookupHost(ctx context.Context, host string) (addrs []string, err error) -} - -func sleep(ctx context.Context, duration time.Duration) bool { - if duration == 0 { - select { - default: - return true - case <-ctx.Done(): - return false - } - } - timer := time.NewTimer(duration) - defer timer.Stop() - select { - case <-timer.C: - return true - case <-ctx.Done(): - return false - } -} - -func backoff(attempt int, min time.Duration, max time.Duration) time.Duration { - d := time.Duration(attempt*attempt) * min - if d > max { - d = max - } - return d -} - -func splitHostPort(s string) (host string, port string) { - host, port, _ = net.SplitHostPort(s) - if len(host) == 0 && len(port) == 0 { - host = s - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/discard.go b/vendor/github.com/segmentio/kafka-go/discard.go deleted file mode 100644 index c92d53035..000000000 --- a/vendor/github.com/segmentio/kafka-go/discard.go +++ /dev/null @@ -1,50 +0,0 @@ -package kafka - -import "bufio" - -func discardN(r *bufio.Reader, sz int, n int) (int, error) { - var err error - if n <= sz { - n, err = r.Discard(n) - } else { - n, err = r.Discard(sz) - if err == nil { - err = errShortRead - } - } - return sz - n, err -} - -func discardInt8(r *bufio.Reader, sz int) (int, error) { - return discardN(r, sz, 1) -} - -func discardInt16(r *bufio.Reader, sz int) (int, error) { - return discardN(r, sz, 2) -} - -func discardInt32(r *bufio.Reader, sz int) (int, error) { - return discardN(r, sz, 4) -} - -func discardInt64(r *bufio.Reader, sz int) (int, error) { - return discardN(r, sz, 8) -} - -func discardString(r *bufio.Reader, sz int) (int, error) { - return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { - if n < 0 { - return sz, nil - } - return discardN(r, sz, n) - }) -} - -func discardBytes(r *bufio.Reader, sz int) (int, error) { - return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { - if n < 0 { - return sz, nil - } - return discardN(r, sz, n) - }) -} diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose.yml b/vendor/github.com/segmentio/kafka-go/docker-compose.yml deleted file mode 100644 index 6c59b8760..000000000 --- a/vendor/github.com/segmentio/kafka-go/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "3" -services: - kafka: - image: wurstmeister/kafka:0.11.0.1 - restart: on-failure:3 - links: - - zookeeper - ports: - - "9092:9092" - environment: - KAFKA_VERSION: '0.11.0.1' - KAFKA_BROKER_ID: 1 - KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' - KAFKA_DELETE_TOPIC_ENABLE: 'true' - KAFKA_ADVERTISED_HOST_NAME: 'localhost' - KAFKA_ADVERTISED_PORT: '9092' - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' - KAFKA_MESSAGE_MAX_BYTES: 200000000 - - zookeeper: - image: wurstmeister/zookeeper - ports: - - "2181:2181" diff --git a/vendor/github.com/segmentio/kafka-go/error.go b/vendor/github.com/segmentio/kafka-go/error.go deleted file mode 100644 index 0559af628..000000000 --- a/vendor/github.com/segmentio/kafka-go/error.go +++ /dev/null @@ -1,360 +0,0 @@ -package kafka - -import ( - "fmt" - "io" -) - -// Error represents the different error codes that may be returned by kafka. -type Error int - -const ( - Unknown Error = -1 - OffsetOutOfRange Error = 1 - InvalidMessage Error = 2 - UnknownTopicOrPartition Error = 3 - InvalidMessageSize Error = 4 - LeaderNotAvailable Error = 5 - NotLeaderForPartition Error = 6 - RequestTimedOut Error = 7 - BrokerNotAvailable Error = 8 - ReplicaNotAvailable Error = 9 - MessageSizeTooLarge Error = 10 - StaleControllerEpoch Error = 11 - OffsetMetadataTooLarge Error = 12 - GroupLoadInProgress Error = 14 - GroupCoordinatorNotAvailable Error = 15 - NotCoordinatorForGroup Error = 16 - InvalidTopic Error = 17 - RecordListTooLarge Error = 18 - NotEnoughReplicas Error = 19 - NotEnoughReplicasAfterAppend Error = 20 - InvalidRequiredAcks Error = 21 - IllegalGeneration Error = 22 - InconsistentGroupProtocol Error = 23 - InvalidGroupId Error = 24 - UnknownMemberId Error = 25 - InvalidSessionTimeout Error = 26 - RebalanceInProgress Error = 27 - InvalidCommitOffsetSize Error = 28 - TopicAuthorizationFailed Error = 29 - GroupAuthorizationFailed Error = 30 - ClusterAuthorizationFailed Error = 31 - InvalidTimestamp Error = 32 - UnsupportedSASLMechanism Error = 33 - IllegalSASLState Error = 34 - UnsupportedVersion Error = 35 - TopicAlreadyExists Error = 36 - InvalidPartitionNumber Error = 37 - InvalidReplicationFactor Error = 38 - InvalidReplicaAssignment Error = 39 - InvalidConfiguration Error = 40 - NotController Error = 41 - InvalidRequest Error = 42 - UnsupportedForMessageFormat Error = 43 - PolicyViolation Error = 44 - OutOfOrderSequenceNumber Error = 45 - DuplicateSequenceNumber Error = 46 - InvalidProducerEpoch Error = 47 - InvalidTransactionState Error = 48 - InvalidProducerIDMapping Error = 49 - InvalidTransactionTimeout Error = 50 - ConcurrentTransactions Error = 51 - TransactionCoordinatorFenced Error = 52 - TransactionalIDAuthorizationFailed Error = 53 - SecurityDisabled Error = 54 - BrokerAuthorizationFailed Error = 55 -) - -// Error satisfies the error interface. -func (e Error) Error() string { - return fmt.Sprintf("[%d] %s: %s", e, e.Title(), e.Description()) -} - -// Timeout returns true if the error was due to a timeout. -func (e Error) Timeout() bool { - return e == RequestTimedOut -} - -// Temporary returns true if the operation that generated the error may succeed -// if retried at a later time. -func (e Error) Temporary() bool { - return e == LeaderNotAvailable || - e == BrokerNotAvailable || - e == ReplicaNotAvailable || - e == GroupLoadInProgress || - e == GroupCoordinatorNotAvailable || - e == RebalanceInProgress || - e.Timeout() -} - -// Title returns a human readable title for the error. -func (e Error) Title() string { - switch e { - case Unknown: - return "Unknown" - case OffsetOutOfRange: - return "Offset Out Of Range" - case InvalidMessage: - return "Invalid Message" - case UnknownTopicOrPartition: - return "Unknown Topic Or Partition" - case InvalidMessageSize: - return "Invalid Message Size" - case LeaderNotAvailable: - return "Leader Not Available" - case NotLeaderForPartition: - return "Not Leader For Partition" - case RequestTimedOut: - return "Request Timed Out" - case BrokerNotAvailable: - return "Broker Not Available" - case ReplicaNotAvailable: - return "Replica Not Available" - case MessageSizeTooLarge: - return "Message Size Too Large" - case StaleControllerEpoch: - return "Stale Controller Epoch" - case OffsetMetadataTooLarge: - return "Offset Metadata Too Large" - case GroupLoadInProgress: - return "Group Load In Progress" - case GroupCoordinatorNotAvailable: - return "Group Coordinator Not Available" - case NotCoordinatorForGroup: - return "Not Coordinator For Group" - case InvalidTopic: - return "Invalid Topic" - case RecordListTooLarge: - return "Record List Too Large" - case NotEnoughReplicas: - return "Not Enough Replicas" - case NotEnoughReplicasAfterAppend: - return "Not Enough Replicas After Append" - case InvalidRequiredAcks: - return "Invalid Required Acks" - case IllegalGeneration: - return "Illegal Generation" - case InconsistentGroupProtocol: - return "Inconsistent Group Protocol" - case InvalidGroupId: - return "Invalid Group ID" - case UnknownMemberId: - return "Unknown Member ID" - case InvalidSessionTimeout: - return "Invalid Session Timeout" - case RebalanceInProgress: - return "Rebalance In Progress" - case InvalidCommitOffsetSize: - return "Invalid Commit Offset Size" - case TopicAuthorizationFailed: - return "Topic Authorization Failed" - case GroupAuthorizationFailed: - return "Group Authorization Failed" - case ClusterAuthorizationFailed: - return "Cluster Authorization Failed" - case InvalidTimestamp: - return "Invalid Timestamp" - case UnsupportedSASLMechanism: - return "Unsupported SASL Mechanism" - case IllegalSASLState: - return "Illegal SASL State" - case UnsupportedVersion: - return "Unsupported Version" - case TopicAlreadyExists: - return "Topic Already Exists" - case InvalidPartitionNumber: - return "Invalid Partition Number" - case InvalidReplicationFactor: - return "Invalid Replication Factor" - case InvalidReplicaAssignment: - return "Invalid Replica Assignment" - case InvalidConfiguration: - return "Invalid Configuration" - case NotController: - return "Not Controller" - case InvalidRequest: - return "Invalid Request" - case UnsupportedForMessageFormat: - return "Unsupported For Message Format" - case PolicyViolation: - return "Policy Violation" - case OutOfOrderSequenceNumber: - return "Out Of Order Sequence Number" - case DuplicateSequenceNumber: - return "Duplicate Sequence Number" - case InvalidProducerEpoch: - return "Invalid Producer Epoch" - case InvalidTransactionState: - return "Invalid Transaction State" - case InvalidProducerIDMapping: - return "Invalid Producer ID Mapping" - case InvalidTransactionTimeout: - return "Invalid Transaction Timeout" - case ConcurrentTransactions: - return "Concurrent Transactions" - case TransactionCoordinatorFenced: - return "Transaction Coordinator Fenced" - case TransactionalIDAuthorizationFailed: - return "Transactional ID Authorization Failed" - case SecurityDisabled: - return "Security Disabled" - case BrokerAuthorizationFailed: - return "Broker Authorization Failed" - } - return "" -} - -// Description returns a human readable description of cause of the error. -func (e Error) Description() string { - switch e { - case Unknown: - return "an unexpected server error occurred" - case OffsetOutOfRange: - return "the requested offset is outside the range of offsets maintained by the server for the given topic/partition" - case InvalidMessage: - return "the message contents does not match its CRC" - case UnknownTopicOrPartition: - return "the request is for a topic or partition that does not exist on this broker" - case InvalidMessageSize: - return "the message has a negative size" - case LeaderNotAvailable: - return "the cluster is in the middle of a leadership election and there is currently no leader for this partition and hence it is unavailable for writes" - case NotLeaderForPartition: - return "the client attempted to send messages to a replica that is not the leader for some partition, the client's metadata are likely out of date" - case RequestTimedOut: - return "the request exceeded the user-specified time limit in the request" - case BrokerNotAvailable: - return "not a client facing error and is used mostly by tools when a broker is not alive" - case ReplicaNotAvailable: - return "a replica is expected on a broker, but is not (this can be safely ignored)" - case MessageSizeTooLarge: - return "the server has a configurable maximum message size to avoid unbounded memory allocation and the client attempted to produce a message larger than this maximum" - case StaleControllerEpoch: - return "internal error code for broker-to-broker communication" - case OffsetMetadataTooLarge: - return "the client specified a string larger than configured maximum for offset metadata" - case GroupLoadInProgress: - return "the broker returns this error code for an offset fetch request if it is still loading offsets (after a leader change for that offsets topic partition), or in response to group membership requests (such as heartbeats) when group metadata is being loaded by the coordinator" - case GroupCoordinatorNotAvailable: - return "the broker returns this error code for group coordinator requests, offset commits, and most group management requests if the offsets topic has not yet been created, or if the group coordinator is not active" - case NotCoordinatorForGroup: - return "the broker returns this error code if it receives an offset fetch or commit request for a group that it is not a coordinator for" - case InvalidTopic: - return "a request which attempted to access an invalid topic (e.g. one which has an illegal name), or if an attempt was made to write to an internal topic (such as the consumer offsets topic)" - case RecordListTooLarge: - return "a message batch in a produce request exceeds the maximum configured segment size" - case NotEnoughReplicas: - return "the number of in-sync replicas is lower than the configured minimum and requiredAcks is -1" - case NotEnoughReplicasAfterAppend: - return "the message was written to the log, but with fewer in-sync replicas than required." - case InvalidRequiredAcks: - return "the requested requiredAcks is invalid (anything other than -1, 1, or 0)" - case IllegalGeneration: - return "the generation id provided in the request is not the current generation" - case InconsistentGroupProtocol: - return "the member provided a protocol type or set of protocols which is not compatible with the current group" - case InvalidGroupId: - return "the group id is empty or null" - case UnknownMemberId: - return "the member id is not in the current generation" - case InvalidSessionTimeout: - return "the requested session timeout is outside of the allowed range on the broker" - case RebalanceInProgress: - return "the coordinator has begun rebalancing the group, the client should rejoin the group" - case InvalidCommitOffsetSize: - return "an offset commit was rejected because of oversize metadata" - case TopicAuthorizationFailed: - return "the client is not authorized to access the requested topic" - case GroupAuthorizationFailed: - return "the client is not authorized to access a particular group id" - case ClusterAuthorizationFailed: - return "the client is not authorized to use an inter-broker or administrative API" - case InvalidTimestamp: - return "the timestamp of the message is out of acceptable range" - case UnsupportedSASLMechanism: - return "the broker does not support the requested SASL mechanism" - case IllegalSASLState: - return "the request is not valid given the current SASL state" - case UnsupportedVersion: - return "the version of API is not supported" - case TopicAlreadyExists: - return "a topic with this name already exists" - case InvalidPartitionNumber: - return "the number of partitions is invalid" - case InvalidReplicationFactor: - return "the replication-factor is invalid" - case InvalidReplicaAssignment: - return "the replica assignment is invalid" - case InvalidConfiguration: - return "the configuration is invalid" - case NotController: - return "this is not the correct controller for this cluster" - case InvalidRequest: - return "this most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker, se the broker logs for more details" - case UnsupportedForMessageFormat: - return "the message format version on the broker does not support the request" - case PolicyViolation: - return "the request parameters do not satisfy the configured policy" - case OutOfOrderSequenceNumber: - return "the broker received an out of order sequence number" - case DuplicateSequenceNumber: - return "the broker received a duplicate sequence number" - case InvalidProducerEpoch: - return "the producer attempted an operation with an old epoch, either there is a newer producer with the same transactional ID, or the producer's transaction has been expired by the broker" - case InvalidTransactionState: - return "the producer attempted a transactional operation in an invalid state" - case InvalidProducerIDMapping: - return "the producer attempted to use a producer id which is not currently assigned to its transactional ID" - case InvalidTransactionTimeout: - return "the transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)" - case ConcurrentTransactions: - return "the producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing" - case TransactionCoordinatorFenced: - return "the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer" - case TransactionalIDAuthorizationFailed: - return "the transactional ID authorization failed" - case SecurityDisabled: - return "the security features are disabled" - case BrokerAuthorizationFailed: - return "the broker authorization failed" - } - return "" -} - -func isTimeout(err error) bool { - e, ok := err.(interface { - Timeout() bool - }) - return ok && e.Timeout() -} - -func isTemporary(err error) bool { - e, ok := err.(interface { - Temporary() bool - }) - return ok && e.Temporary() -} - -func silentEOF(err error) error { - if err == io.EOF { - err = nil - } - return err -} - -func dontExpectEOF(err error) error { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err -} - -func coalesceErrors(errs ...error) error { - for _, err := range errs { - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/segmentio/kafka-go/fetch.go b/vendor/github.com/segmentio/kafka-go/fetch.go deleted file mode 100644 index 3b2f72afc..000000000 --- a/vendor/github.com/segmentio/kafka-go/fetch.go +++ /dev/null @@ -1,101 +0,0 @@ -package kafka - -import "bufio" - -type fetchRequestV2 struct { - ReplicaID int32 - MaxWaitTime int32 - MinBytes int32 - Topics []fetchRequestTopicV2 -} - -func (r fetchRequestV2) size() int32 { - return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) -} - -func (r fetchRequestV2) writeTo(w *bufio.Writer) { - writeInt32(w, r.ReplicaID) - writeInt32(w, r.MaxWaitTime) - writeInt32(w, r.MinBytes) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) -} - -type fetchRequestTopicV2 struct { - TopicName string - Partitions []fetchRequestPartitionV2 -} - -func (t fetchRequestTopicV2) size() int32 { - return sizeofString(t.TopicName) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t fetchRequestTopicV2) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type fetchRequestPartitionV2 struct { - Partition int32 - FetchOffset int64 - MaxBytes int32 -} - -func (p fetchRequestPartitionV2) size() int32 { - return 4 + 8 + 4 -} - -func (p fetchRequestPartitionV2) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt64(w, p.FetchOffset) - writeInt32(w, p.MaxBytes) -} - -type fetchResponseV2 struct { - ThrottleTime int32 - Topics []fetchResponseTopicV2 -} - -func (r fetchResponseV2) size() int32 { - return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) -} - -func (r fetchResponseV2) writeTo(w *bufio.Writer) { - writeInt32(w, r.ThrottleTime) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) -} - -type fetchResponseTopicV2 struct { - TopicName string - Partitions []fetchResponsePartitionV2 -} - -func (t fetchResponseTopicV2) size() int32 { - return sizeofString(t.TopicName) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t fetchResponseTopicV2) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type fetchResponsePartitionV2 struct { - Partition int32 - ErrorCode int16 - HighwaterMarkOffset int64 - MessageSetSize int32 - MessageSet messageSet -} - -func (p fetchResponsePartitionV2) size() int32 { - return 4 + 2 + 8 + 4 + p.MessageSet.size() -} - -func (p fetchResponsePartitionV2) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt16(w, p.ErrorCode) - writeInt64(w, p.HighwaterMarkOffset) - writeInt32(w, p.MessageSetSize) - p.MessageSet.writeTo(w) -} diff --git a/vendor/github.com/segmentio/kafka-go/findcoordinator.go b/vendor/github.com/segmentio/kafka-go/findcoordinator.go deleted file mode 100644 index d0b47728e..000000000 --- a/vendor/github.com/segmentio/kafka-go/findcoordinator.go +++ /dev/null @@ -1,86 +0,0 @@ -package kafka - -import ( - "bufio" -) - -// FindCoordinatorRequestV0 requests the coordinator for the specified group or transaction -// -// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator -type findCoordinatorRequestV0 struct { - // CoordinatorKey holds id to use for finding the coordinator (for groups, this is - // the groupId, for transactional producers, this is the transactional id) - CoordinatorKey string -} - -func (t findCoordinatorRequestV0) size() int32 { - return sizeofString(t.CoordinatorKey) -} - -func (t findCoordinatorRequestV0) writeTo(w *bufio.Writer) { - writeString(w, t.CoordinatorKey) -} - -type findCoordinatorResponseCoordinatorV0 struct { - // NodeID holds the broker id. - NodeID int32 - - // Host of the broker - Host string - - // Port on which broker accepts requests - Port int32 -} - -func (t findCoordinatorResponseCoordinatorV0) size() int32 { - return sizeofInt32(t.NodeID) + - sizeofString(t.Host) + - sizeofInt32(t.Port) -} - -func (t findCoordinatorResponseCoordinatorV0) writeTo(w *bufio.Writer) { - writeInt32(w, t.NodeID) - writeString(w, t.Host) - writeInt32(w, t.Port) -} - -func (t *findCoordinatorResponseCoordinatorV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.NodeID); err != nil { - return - } - if remain, err = readString(r, remain, &t.Host); err != nil { - return - } - if remain, err = readInt32(r, remain, &t.Port); err != nil { - return - } - return -} - -type findCoordinatorResponseV0 struct { - // ErrorCode holds response error code - ErrorCode int16 - - // Coordinator holds host and port information for the coordinator - Coordinator findCoordinatorResponseCoordinatorV0 -} - -func (t findCoordinatorResponseV0) size() int32 { - return sizeofInt16(t.ErrorCode) + - t.Coordinator.size() -} - -func (t findCoordinatorResponseV0) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) - t.Coordinator.writeTo(w) -} - -func (t *findCoordinatorResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { - return - } - if remain, err = (&t.Coordinator).readFrom(r, remain); err != nil { - return - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/groupbalancer.go b/vendor/github.com/segmentio/kafka-go/groupbalancer.go deleted file mode 100644 index 7e46cc7d4..000000000 --- a/vendor/github.com/segmentio/kafka-go/groupbalancer.go +++ /dev/null @@ -1,187 +0,0 @@ -package kafka - -import "sort" - -// GroupMember describes a single participant in a consumer group. -type GroupMember struct { - // ID is the unique ID for this member as taken from the JoinGroup response. - ID string - - // Topics is a list of topics that this member is consuming. - Topics []string - - // UserData contains any information that the GroupBalancer sent to the - // consumer group coordinator. - UserData []byte -} - -// GroupMemberAssignments holds MemberID => topic => partitions -type GroupMemberAssignments map[string]map[string][]int - -// GroupBalancer encapsulates the client side rebalancing logic -type GroupBalancer interface { - // ProtocolName of the GroupBalancer - ProtocolName() string - - // UserData provides the GroupBalancer an opportunity to embed custom - // UserData into the metadata. - // - // Will be used by JoinGroup to begin the consumer group handshake. - // - // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-JoinGroupRequest - UserData() ([]byte, error) - - // DefineMemberships returns which members will be consuming - // which topic partitions - AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments -} - -// RangeGroupBalancer groups consumers by partition -// -// Example: 5 partitions, 2 consumers -// C0: [0, 1, 2] -// C1: [3, 4] -// -// Example: 6 partitions, 3 consumers -// C0: [0, 1] -// C1: [2, 3] -// C2: [4, 5] -// -type RangeGroupBalancer struct{} - -func (r RangeGroupBalancer) ProtocolName() string { - return "range" -} - -func (r RangeGroupBalancer) UserData() ([]byte, error) { - return nil, nil -} - -func (r RangeGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { - groupAssignments := GroupMemberAssignments{} - membersByTopic := findMembersByTopic(members) - - for topic, members := range membersByTopic { - partitions := findPartitions(topic, topicPartitions) - partitionCount := len(partitions) - memberCount := len(members) - - for memberIndex, member := range members { - assignmentsByTopic, ok := groupAssignments[member.ID] - if !ok { - assignmentsByTopic = map[string][]int{} - groupAssignments[member.ID] = assignmentsByTopic - } - - minIndex := memberIndex * partitionCount / memberCount - maxIndex := (memberIndex + 1) * partitionCount / memberCount - - for partitionIndex, partition := range partitions { - if partitionIndex >= minIndex && partitionIndex < maxIndex { - assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) - } - } - } - } - - return groupAssignments -} - -// RoundrobinGroupBalancer divides partitions evenly among consumers -// -// Example: 5 partitions, 2 consumers -// C0: [0, 2, 4] -// C1: [1, 3] -// -// Example: 6 partitions, 3 consumers -// C0: [0, 3] -// C1: [1, 4] -// C2: [2, 5] -// -type RoundRobinGroupBalancer struct{} - -func (r RoundRobinGroupBalancer) ProtocolName() string { - return "roundrobin" -} - -func (r RoundRobinGroupBalancer) UserData() ([]byte, error) { - return nil, nil -} - -func (r RoundRobinGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { - groupAssignments := GroupMemberAssignments{} - membersByTopic := findMembersByTopic(members) - for topic, members := range membersByTopic { - partitionIDs := findPartitions(topic, topicPartitions) - memberCount := len(members) - - for memberIndex, member := range members { - assignmentsByTopic, ok := groupAssignments[member.ID] - if !ok { - assignmentsByTopic = map[string][]int{} - groupAssignments[member.ID] = assignmentsByTopic - } - - for partitionIndex, partition := range partitionIDs { - if (partitionIndex % memberCount) == memberIndex { - assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) - } - } - } - } - - return groupAssignments -} - -// findPartitions extracts the partition ids associated with the topic from the -// list of Partitions provided -func findPartitions(topic string, partitions []Partition) []int { - var ids []int - for _, partition := range partitions { - if partition.Topic == topic { - ids = append(ids, partition.ID) - } - } - return ids -} - -// findMembersByTopic groups the memberGroupMetadata by topic -func findMembersByTopic(members []GroupMember) map[string][]GroupMember { - membersByTopic := map[string][]GroupMember{} - for _, member := range members { - for _, topic := range member.Topics { - membersByTopic[topic] = append(membersByTopic[topic], member) - } - } - - // normalize ordering of members to enabling grouping across topics by partitions - // - // Want: - // C0 [T0/P0, T1/P0] - // C1 [T0/P1, T1/P1] - // - // Not: - // C0 [T0/P0, T1/P1] - // C1 [T0/P1, T1/P0] - // - // Even though the later is still round robin, the partitions are crossed - // - for _, members := range membersByTopic { - sort.Slice(members, func(i, j int) bool { - return members[i].ID < members[j].ID - }) - } - - return membersByTopic -} - -// findGroupBalancer returns the GroupBalancer with the specified protocolName -// from the slice provided -func findGroupBalancer(protocolName string, balancers []GroupBalancer) (GroupBalancer, bool) { - for _, balancer := range balancers { - if balancer.ProtocolName() == protocolName { - return balancer, true - } - } - return nil, false -} diff --git a/vendor/github.com/segmentio/kafka-go/heartbeat.go b/vendor/github.com/segmentio/kafka-go/heartbeat.go deleted file mode 100644 index fd95049a1..000000000 --- a/vendor/github.com/segmentio/kafka-go/heartbeat.go +++ /dev/null @@ -1,46 +0,0 @@ -package kafka - -import "bufio" - -type heartbeatRequestV0 struct { - // GroupID holds the unique group identifier - GroupID string - - // GenerationID holds the generation of the group. - GenerationID int32 - - // MemberID assigned by the group coordinator - MemberID string -} - -func (t heartbeatRequestV0) size() int32 { - return sizeofString(t.GroupID) + - sizeofInt32(t.GenerationID) + - sizeofString(t.MemberID) -} - -func (t heartbeatRequestV0) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.GenerationID) - writeString(w, t.MemberID) -} - -type heartbeatResponseV0 struct { - // ErrorCode holds response error code - ErrorCode int16 -} - -func (t heartbeatResponseV0) size() int32 { - return sizeofInt16(t.ErrorCode) -} - -func (t heartbeatResponseV0) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) -} - -func (t *heartbeatResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { - if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { - return - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/joingroup.go b/vendor/github.com/segmentio/kafka-go/joingroup.go deleted file mode 100644 index 81d3ca973..000000000 --- a/vendor/github.com/segmentio/kafka-go/joingroup.go +++ /dev/null @@ -1,202 +0,0 @@ -package kafka - -import ( - "bufio" - "bytes" -) - -type memberGroupMetadata struct { - // MemberID assigned by the group coordinator or null if joining for the - // first time. - MemberID string - Metadata groupMetadata -} - -type groupMetadata struct { - Version int16 - Topics []string - UserData []byte -} - -func (t groupMetadata) size() int32 { - return sizeofInt16(t.Version) + - sizeofStringArray(t.Topics) + - sizeofBytes(t.UserData) -} - -func (t groupMetadata) writeTo(w *bufio.Writer) { - writeInt16(w, t.Version) - writeStringArray(w, t.Topics) - writeBytes(w, t.UserData) -} - -func (t groupMetadata) bytes() []byte { - buf := bytes.NewBuffer(nil) - w := bufio.NewWriter(buf) - t.writeTo(w) - w.Flush() - return buf.Bytes() -} - -func (t *groupMetadata) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt16(r, size, &t.Version); err != nil { - return - } - if remain, err = readStringArray(r, remain, &t.Topics); err != nil { - return - } - if remain, err = readBytes(r, remain, &t.UserData); err != nil { - return - } - return -} - -type joinGroupRequestGroupProtocolV1 struct { - ProtocolName string - ProtocolMetadata []byte -} - -func (t joinGroupRequestGroupProtocolV1) size() int32 { - return sizeofString(t.ProtocolName) + - sizeofBytes(t.ProtocolMetadata) -} - -func (t joinGroupRequestGroupProtocolV1) writeTo(w *bufio.Writer) { - writeString(w, t.ProtocolName) - writeBytes(w, t.ProtocolMetadata) -} - -type joinGroupRequestV1 struct { - // GroupID holds the unique group identifier - GroupID string - - // SessionTimeout holds the coordinator considers the consumer dead if it - // receives no heartbeat after this timeout in ms. - SessionTimeout int32 - - // RebalanceTimeout holds the maximum time that the coordinator will wait - // for each member to rejoin when rebalancing the group in ms - RebalanceTimeout int32 - - // MemberID assigned by the group coordinator or the zero string if joining - // for the first time. - MemberID string - - // ProtocolType holds the unique name for class of protocols implemented by group - ProtocolType string - - // GroupProtocols holds the list of protocols that the member supports - GroupProtocols []joinGroupRequestGroupProtocolV1 -} - -func (t joinGroupRequestV1) size() int32 { - return sizeofString(t.GroupID) + - sizeofInt32(t.SessionTimeout) + - sizeofInt32(t.RebalanceTimeout) + - sizeofString(t.MemberID) + - sizeofString(t.ProtocolType) + - sizeofArray(len(t.GroupProtocols), func(i int) int32 { return t.GroupProtocols[i].size() }) -} - -func (t joinGroupRequestV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.SessionTimeout) - writeInt32(w, t.RebalanceTimeout) - writeString(w, t.MemberID) - writeString(w, t.ProtocolType) - writeArray(w, len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(w) }) -} - -type joinGroupResponseMemberV1 struct { - // MemberID assigned by the group coordinator - MemberID string - MemberMetadata []byte -} - -func (t joinGroupResponseMemberV1) size() int32 { - return sizeofString(t.MemberID) + - sizeofBytes(t.MemberMetadata) -} - -func (t joinGroupResponseMemberV1) writeTo(w *bufio.Writer) { - writeString(w, t.MemberID) - writeBytes(w, t.MemberMetadata) -} - -func (t *joinGroupResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.MemberID); err != nil { - return - } - if remain, err = readBytes(r, remain, &t.MemberMetadata); err != nil { - return - } - return -} - -type joinGroupResponseV1 struct { - // ErrorCode holds response error code - ErrorCode int16 - - // GenerationID holds the generation of the group. - GenerationID int32 - - // GroupProtocol holds the group protocol selected by the coordinator - GroupProtocol string - - // LeaderID holds the leader of the group - LeaderID string - - // MemberID assigned by the group coordinator - MemberID string - Members []joinGroupResponseMemberV1 -} - -func (t joinGroupResponseV1) size() int32 { - return sizeofInt16(t.ErrorCode) + - sizeofInt32(t.GenerationID) + - sizeofString(t.GroupProtocol) + - sizeofString(t.LeaderID) + - sizeofString(t.MemberID) + - sizeofArray(len(t.MemberID), func(i int) int32 { return t.Members[i].size() }) -} - -func (t joinGroupResponseV1) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) - writeInt32(w, t.GenerationID) - writeString(w, t.GroupProtocol) - writeString(w, t.LeaderID) - writeString(w, t.MemberID) - writeArray(w, len(t.Members), func(i int) { t.Members[i].writeTo(w) }) -} - -func (t *joinGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { - return - } - if remain, err = readInt32(r, remain, &t.GenerationID); err != nil { - return - } - if remain, err = readString(r, remain, &t.GroupProtocol); err != nil { - return - } - if remain, err = readString(r, remain, &t.LeaderID); err != nil { - return - } - if remain, err = readString(r, remain, &t.MemberID); err != nil { - return - } - - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - var item joinGroupResponseMemberV1 - if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil { - return - } - t.Members = append(t.Members, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/leavegroup.go b/vendor/github.com/segmentio/kafka-go/leavegroup.go deleted file mode 100644 index c87c0dbfc..000000000 --- a/vendor/github.com/segmentio/kafka-go/leavegroup.go +++ /dev/null @@ -1,42 +0,0 @@ -package kafka - -import "bufio" - -type leaveGroupRequestV0 struct { - // GroupID holds the unique group identifier - GroupID string - - // MemberID assigned by the group coordinator or the zero string if joining - // for the first time. - MemberID string -} - -func (t leaveGroupRequestV0) size() int32 { - return sizeofString(t.GroupID) + - sizeofString(t.MemberID) -} - -func (t leaveGroupRequestV0) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeString(w, t.MemberID) -} - -type leaveGroupResponseV0 struct { - // ErrorCode holds response error code - ErrorCode int16 -} - -func (t leaveGroupResponseV0) size() int32 { - return sizeofInt16(t.ErrorCode) -} - -func (t leaveGroupResponseV0) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) -} - -func (t *leaveGroupResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { - return - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/listgroups.go b/vendor/github.com/segmentio/kafka-go/listgroups.go deleted file mode 100644 index a369f7d09..000000000 --- a/vendor/github.com/segmentio/kafka-go/listgroups.go +++ /dev/null @@ -1,87 +0,0 @@ -package kafka - -import ( - "bufio" -) - -type listGroupsRequestV1 struct { -} - -func (t listGroupsRequestV1) size() int32 { - return 0 -} - -func (t listGroupsRequestV1) writeTo(w *bufio.Writer) { -} - -type ListGroupsResponseGroupV1 struct { - // GroupID holds the unique group identifier - GroupID string - ProtocolType string -} - -func (t ListGroupsResponseGroupV1) size() int32 { - return sizeofString(t.GroupID) + - sizeofString(t.ProtocolType) -} - -func (t ListGroupsResponseGroupV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeString(w, t.ProtocolType) -} - -func (t *ListGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.GroupID); err != nil { - return - } - if remain, err = readString(r, remain, &t.ProtocolType); err != nil { - return - } - return -} - -type listGroupsResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - - // ErrorCode holds response error code - ErrorCode int16 - Groups []ListGroupsResponseGroupV1 -} - -func (t listGroupsResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofInt16(t.ErrorCode) + - sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) -} - -func (t listGroupsResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) - writeArray(w, len(t.Groups), func(i int) { t.Groups[i].writeTo(w) }) -} - -func (t *listGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } - - fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - var item ListGroupsResponseGroupV1 - if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { - return - } - t.Groups = append(t.Groups, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/listoffset.go b/vendor/github.com/segmentio/kafka-go/listoffset.go deleted file mode 100644 index 12903197c..000000000 --- a/vendor/github.com/segmentio/kafka-go/listoffset.go +++ /dev/null @@ -1,105 +0,0 @@ -package kafka - -import "bufio" - -type listOffsetRequestV1 struct { - ReplicaID int32 - Topics []listOffsetRequestTopicV1 -} - -func (r listOffsetRequestV1) size() int32 { - return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) -} - -func (r listOffsetRequestV1) writeTo(w *bufio.Writer) { - writeInt32(w, r.ReplicaID) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) -} - -type listOffsetRequestTopicV1 struct { - TopicName string - Partitions []listOffsetRequestPartitionV1 -} - -func (t listOffsetRequestTopicV1) size() int32 { - return sizeofString(t.TopicName) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t listOffsetRequestTopicV1) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type listOffsetRequestPartitionV1 struct { - Partition int32 - Time int64 -} - -func (p listOffsetRequestPartitionV1) size() int32 { - return 4 + 8 -} - -func (p listOffsetRequestPartitionV1) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt64(w, p.Time) -} - -type listOffsetResponseV1 []listOffsetResponseTopicV1 - -func (r listOffsetResponseV1) size() int32 { - return sizeofArray(len(r), func(i int) int32 { return r[i].size() }) -} - -func (r listOffsetResponseV1) writeTo(w *bufio.Writer) { - writeArray(w, len(r), func(i int) { r[i].writeTo(w) }) -} - -type listOffsetResponseTopicV1 struct { - TopicName string - PartitionOffsets []partitionOffsetV1 -} - -func (t listOffsetResponseTopicV1) size() int32 { - return sizeofString(t.TopicName) + - sizeofArray(len(t.PartitionOffsets), func(i int) int32 { return t.PartitionOffsets[i].size() }) -} - -func (t listOffsetResponseTopicV1) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(w) }) -} - -type partitionOffsetV1 struct { - Partition int32 - ErrorCode int16 - Timestamp int64 - Offset int64 -} - -func (p partitionOffsetV1) size() int32 { - return 4 + 2 + 8 + 8 -} - -func (p partitionOffsetV1) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt16(w, p.ErrorCode) - writeInt64(w, p.Timestamp) - writeInt64(w, p.Offset) -} - -func (p *partitionOffsetV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) { - if remain, err = readInt32(r, sz, &p.Partition); err != nil { - return - } - if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { - return - } - if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { - return - } - if remain, err = readInt64(r, remain, &p.Offset); err != nil { - return - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/message.go b/vendor/github.com/segmentio/kafka-go/message.go deleted file mode 100644 index 13d5edc97..000000000 --- a/vendor/github.com/segmentio/kafka-go/message.go +++ /dev/null @@ -1,253 +0,0 @@ -package kafka - -import ( - "bufio" - "bytes" - "time" -) - -// Message is a data structure representing kafka messages. -type Message struct { - // Topic is reads only and MUST NOT be set when writing messages - Topic string - - // Partition is reads only and MUST NOT be set when writing messages - Partition int - Offset int64 - Key []byte - Value []byte - - // If not set at the creation, Time will be automatically set when - // writing the message. - Time time.Time -} - -func (msg Message) item() messageSetItem { - item := messageSetItem{ - Offset: msg.Offset, - Message: msg.message(), - } - item.MessageSize = item.Message.size() - return item -} - -func (msg Message) message() message { - m := message{ - MagicByte: 1, - Key: msg.Key, - Value: msg.Value, - Timestamp: timestamp(msg.Time), - } - m.CRC = m.crc32() - return m -} - -type message struct { - CRC int32 - MagicByte int8 - Attributes int8 - Timestamp int64 - Key []byte - Value []byte -} - -func (m message) crc32() int32 { - return int32(crc32OfMessage(m.MagicByte, m.Attributes, m.Timestamp, m.Key, m.Value)) -} - -func (m message) size() int32 { - size := 4 + 1 + 1 + sizeofBytes(m.Key) + sizeofBytes(m.Value) - if m.MagicByte != 0 { - size += 8 // Timestamp - } - return size -} - -func (m message) writeTo(w *bufio.Writer) { - writeInt32(w, m.CRC) - writeInt8(w, m.MagicByte) - writeInt8(w, m.Attributes) - if m.MagicByte != 0 { - writeInt64(w, m.Timestamp) - } - writeBytes(w, m.Key) - writeBytes(w, m.Value) -} - -type messageSetItem struct { - Offset int64 - MessageSize int32 - Message message -} - -func (m messageSetItem) size() int32 { - return 8 + 4 + m.Message.size() -} - -func (m messageSetItem) writeTo(w *bufio.Writer) { - writeInt64(w, m.Offset) - writeInt32(w, m.MessageSize) - m.Message.writeTo(w) -} - -type messageSet []messageSetItem - -func (s messageSet) size() (size int32) { - for _, m := range s { - size += m.size() - } - return -} - -func (s messageSet) writeTo(w *bufio.Writer) { - for _, m := range s { - m.writeTo(w) - } -} - -type messageSetReader struct { - *readerStack -} - -type readerStack struct { - reader *bufio.Reader - remain int - base int64 - parent *readerStack -} - -func newMessageSetReader(reader *bufio.Reader, remain int) *messageSetReader { - return &messageSetReader{&readerStack{ - reader: reader, - remain: remain, - }} -} - -func (r *messageSetReader) readMessage(min int64, - key func(*bufio.Reader, int, int) (int, error), - val func(*bufio.Reader, int, int) (int, error), -) (offset int64, timestamp int64, err error) { - for r.readerStack != nil { - if r.remain == 0 { - r.readerStack = r.parent - continue - } - - var attributes int8 - if offset, attributes, timestamp, r.remain, err = readMessageHeader(r.reader, r.remain); err != nil { - return - } - - // if the message is compressed, decompress it and push a new reader - // onto the stack. - code := attributes & compressionCodecMask - if code != 0 { - var codec CompressionCodec - if codec, err = resolveCodec(attributes); err != nil { - return - } - - // discard next four bytes...will be -1 to indicate null key - if r.remain, err = discardN(r.reader, r.remain, 4); err != nil { - return - } - - // read and decompress the contained message set. - var decompressed []byte - if r.remain, err = readBytesWith(r.reader, r.remain, func(r *bufio.Reader, sz, n int) (remain int, err error) { - var value []byte - if value, remain, err = readNewBytes(r, sz, n); err != nil { - return - } - decompressed, err = codec.Decode(value) - return - }); err != nil { - return - } - - // the compressed message's offset will be equal to the offset of - // the last message in the set. within the compressed set, the - // offsets will be relative, so we have to scan through them to - // get the base offset. for example, if there are four compressed - // messages at offsets 10-13, then the container message will have - // offset 13 and the contained messages will be 0,1,2,3. the base - // offset for the container, then is 13-3=10. - if offset, err = extractOffset(offset, decompressed); err != nil { - return - } - - r.readerStack = &readerStack{ - reader: bufio.NewReader(bytes.NewReader(decompressed)), - remain: len(decompressed), - base: offset, - parent: r.readerStack, - } - continue - } - - // adjust the offset in case we're reading compressed messages. the - // base will be zero otherwise. - offset += r.base - - // When the messages are compressed kafka may return messages at an - // earlier offset than the one that was requested, it's the client's - // responsibility to ignore those. - if offset < min { - if r.remain, err = discardBytes(r.reader, r.remain); err != nil { - return - } - if r.remain, err = discardBytes(r.reader, r.remain); err != nil { - return - } - continue - } - - if r.remain, err = readBytesWith(r.reader, r.remain, key); err != nil { - return - } - r.remain, err = readBytesWith(r.reader, r.remain, val) - return - } - - err = errShortRead - return -} - -func (r *messageSetReader) remaining() (remain int) { - for s := r.readerStack; s != nil; s = s.parent { - remain += s.remain - } - return -} - -func (r *messageSetReader) discard() (err error) { - if r.readerStack == nil { - return - } - // rewind up to the top-most reader b/c it's the only one that's doing - // actual i/o. the rest are byte buffers that have been pushed on the stack - // while reading compressed message sets. - for r.parent != nil { - r.readerStack = r.parent - } - r.remain, err = discardN(r.reader, r.remain, r.remain) - return -} - -func extractOffset(base int64, msgSet []byte) (offset int64, err error) { - r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet) - for remain > 0 { - if remain, err = readInt64(r, remain, &offset); err != nil { - return - } - var sz int32 - if remain, err = readInt32(r, remain, &sz); err != nil { - return - } - if remain, err = discardN(r, remain, int(sz)); err != nil { - return - } - } - offset = base - offset - return -} diff --git a/vendor/github.com/segmentio/kafka-go/metadata.go b/vendor/github.com/segmentio/kafka-go/metadata.go deleted file mode 100644 index 2c578f832..000000000 --- a/vendor/github.com/segmentio/kafka-go/metadata.go +++ /dev/null @@ -1,89 +0,0 @@ -package kafka - -import "bufio" - -type topicMetadataRequestV1 []string - -func (r topicMetadataRequestV1) size() int32 { - return sizeofStringArray([]string(r)) -} - -func (r topicMetadataRequestV1) writeTo(w *bufio.Writer) { - writeStringArray(w, []string(r)) -} - -type metadataResponseV1 struct { - Brokers []brokerMetadataV1 - ControllerID int32 - Topics []topicMetadataV1 -} - -func (r metadataResponseV1) size() int32 { - n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() }) - n2 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) - return 4 + n1 + n2 -} - -func (r metadataResponseV1) writeTo(w *bufio.Writer) { - writeArray(w, len(r.Brokers), func(i int) { r.Brokers[i].writeTo(w) }) - writeInt32(w, r.ControllerID) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) -} - -type brokerMetadataV1 struct { - NodeID int32 - Host string - Port int32 - Rack string -} - -func (b brokerMetadataV1) size() int32 { - return 4 + 4 + sizeofString(b.Host) + sizeofString(b.Rack) -} - -func (b brokerMetadataV1) writeTo(w *bufio.Writer) { - writeInt32(w, b.NodeID) - writeString(w, b.Host) - writeInt32(w, b.Port) - writeString(w, b.Rack) -} - -type topicMetadataV1 struct { - TopicErrorCode int16 - TopicName string - Internal bool - Partitions []partitionMetadataV1 -} - -func (t topicMetadataV1) size() int32 { - return 2 + 1 + - sizeofString(t.TopicName) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t topicMetadataV1) writeTo(w *bufio.Writer) { - writeInt16(w, t.TopicErrorCode) - writeString(w, t.TopicName) - writeBool(w, t.Internal) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type partitionMetadataV1 struct { - PartitionErrorCode int16 - PartitionID int32 - Leader int32 - Replicas []int32 - Isr []int32 -} - -func (p partitionMetadataV1) size() int32 { - return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) -} - -func (p partitionMetadataV1) writeTo(w *bufio.Writer) { - writeInt16(w, p.PartitionErrorCode) - writeInt32(w, p.PartitionID) - writeInt32(w, p.Leader) - writeInt32Array(w, p.Replicas) - writeInt32Array(w, p.Isr) -} diff --git a/vendor/github.com/segmentio/kafka-go/offsetcommit.go b/vendor/github.com/segmentio/kafka-go/offsetcommit.go deleted file mode 100644 index cbd024caa..000000000 --- a/vendor/github.com/segmentio/kafka-go/offsetcommit.go +++ /dev/null @@ -1,167 +0,0 @@ -package kafka - -import "bufio" - -type offsetCommitRequestV2Partition struct { - // Partition ID - Partition int32 - - // Offset to be committed - Offset int64 - - // Metadata holds any associated metadata the client wants to keep - Metadata string -} - -func (t offsetCommitRequestV2Partition) size() int32 { - return sizeofInt32(t.Partition) + - sizeofInt64(t.Offset) + - sizeofString(t.Metadata) -} - -func (t offsetCommitRequestV2Partition) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt64(w, t.Offset) - writeString(w, t.Metadata) -} - -type offsetCommitRequestV2Topic struct { - // Topic name - Topic string - - // Partitions to commit offsets - Partitions []offsetCommitRequestV2Partition -} - -func (t offsetCommitRequestV2Topic) size() int32 { - return sizeofString(t.Topic) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t offsetCommitRequestV2Topic) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type offsetCommitRequestV2 struct { - // GroupID holds the unique group identifier - GroupID string - - // GenerationID holds the generation of the group. - GenerationID int32 - - // MemberID assigned by the group coordinator - MemberID string - - // RetentionTime holds the time period in ms to retain the offset. - RetentionTime int64 - - // Topics to commit offsets - Topics []offsetCommitRequestV2Topic -} - -func (t offsetCommitRequestV2) size() int32 { - return sizeofString(t.GroupID) + - sizeofInt32(t.GenerationID) + - sizeofString(t.MemberID) + - sizeofInt64(t.RetentionTime) + - sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) -} - -func (t offsetCommitRequestV2) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.GenerationID) - writeString(w, t.MemberID) - writeInt64(w, t.RetentionTime) - writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) }) -} - -type offsetCommitResponseV2PartitionResponse struct { - Partition int32 - - // ErrorCode holds response error code - ErrorCode int16 -} - -func (t offsetCommitResponseV2PartitionResponse) size() int32 { - return sizeofInt32(t.Partition) + - sizeofInt16(t.ErrorCode) -} - -func (t offsetCommitResponseV2PartitionResponse) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt16(w, t.ErrorCode) -} - -func (t *offsetCommitResponseV2PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.Partition); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } - return -} - -type offsetCommitResponseV2Response struct { - Topic string - PartitionResponses []offsetCommitResponseV2PartitionResponse -} - -func (t offsetCommitResponseV2Response) size() int32 { - return sizeofString(t.Topic) + - sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) -} - -func (t offsetCommitResponseV2Response) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeArray(w, len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(w) }) -} - -func (t *offsetCommitResponseV2Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.Topic); err != nil { - return - } - - fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - item := offsetCommitResponseV2PartitionResponse{} - if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { - return - } - t.PartitionResponses = append(t.PartitionResponses, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - return -} - -type offsetCommitResponseV2 struct { - Responses []offsetCommitResponseV2Response -} - -func (t offsetCommitResponseV2) size() int32 { - return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) -} - -func (t offsetCommitResponseV2) writeTo(w *bufio.Writer) { - writeArray(w, len(t.Responses), func(i int) { t.Responses[i].writeTo(w) }) -} - -func (t *offsetCommitResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { - fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - item := offsetCommitResponseV2Response{} - if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { - return - } - t.Responses = append(t.Responses, item) - return - } - if remain, err = readArrayWith(r, size, fn); err != nil { - return - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/offsetfetch.go b/vendor/github.com/segmentio/kafka-go/offsetfetch.go deleted file mode 100644 index 8b822149c..000000000 --- a/vendor/github.com/segmentio/kafka-go/offsetfetch.go +++ /dev/null @@ -1,168 +0,0 @@ -package kafka - -import ( - "bufio" -) - -type offsetFetchRequestV1Topic struct { - // Topic name - Topic string - - // Partitions to fetch offsets - Partitions []int32 -} - -func (t offsetFetchRequestV1Topic) size() int32 { - return sizeofString(t.Topic) + - sizeofInt32Array(t.Partitions) -} - -func (t offsetFetchRequestV1Topic) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt32Array(w, t.Partitions) -} - -type offsetFetchRequestV1 struct { - // GroupID holds the unique group identifier - GroupID string - - // Topics to fetch offsets. - Topics []offsetFetchRequestV1Topic -} - -func (t offsetFetchRequestV1) size() int32 { - return sizeofString(t.GroupID) + - sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) -} - -func (t offsetFetchRequestV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) }) -} - -type offsetFetchResponseV1PartitionResponse struct { - // Partition ID - Partition int32 - - // Offset of last committed message - Offset int64 - - // Metadata client wants to keep - Metadata string - - // ErrorCode holds response error code - ErrorCode int16 -} - -func (t offsetFetchResponseV1PartitionResponse) size() int32 { - return sizeofInt32(t.Partition) + - sizeofInt64(t.Offset) + - sizeofString(t.Metadata) + - sizeofInt16(t.ErrorCode) -} - -func (t offsetFetchResponseV1PartitionResponse) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt64(w, t.Offset) - writeString(w, t.Metadata) - writeInt16(w, t.ErrorCode) -} - -func (t *offsetFetchResponseV1PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.Partition); err != nil { - return - } - if remain, err = readInt64(r, remain, &t.Offset); err != nil { - return - } - if remain, err = readString(r, remain, &t.Metadata); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } - return -} - -type offsetFetchResponseV1Response struct { - // Topic name - Topic string - - // PartitionResponses holds offsets by partition - PartitionResponses []offsetFetchResponseV1PartitionResponse -} - -func (t offsetFetchResponseV1Response) size() int32 { - return sizeofString(t.Topic) + - sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) -} - -func (t offsetFetchResponseV1Response) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeArray(w, len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(w) }) -} - -func (t *offsetFetchResponseV1Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readString(r, size, &t.Topic); err != nil { - return - } - - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - item := offsetFetchResponseV1PartitionResponse{} - if fnRemain, fnErr = (&item).readFrom(r, size); err != nil { - return - } - t.PartitionResponses = append(t.PartitionResponses, item) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - return -} - -type offsetFetchResponseV1 struct { - // Responses holds topic partition offsets - Responses []offsetFetchResponseV1Response -} - -func (t offsetFetchResponseV1) size() int32 { - return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) -} - -func (t offsetFetchResponseV1) writeTo(w *bufio.Writer) { - writeArray(w, len(t.Responses), func(i int) { t.Responses[i].writeTo(w) }) -} - -func (t *offsetFetchResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - item := offsetFetchResponseV1Response{} - if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { - return - } - t.Responses = append(t.Responses, item) - return - } - if remain, err = readArrayWith(r, size, fn); err != nil { - return - } - - return -} - -func findOffset(topic string, partition int32, response offsetFetchResponseV1) (int64, bool) { - for _, r := range response.Responses { - if r.Topic != topic { - continue - } - - for _, pr := range r.PartitionResponses { - if pr.Partition == partition { - return pr.Offset, true - } - } - } - - return 0, false -} diff --git a/vendor/github.com/segmentio/kafka-go/produce.go b/vendor/github.com/segmentio/kafka-go/produce.go deleted file mode 100644 index 0eda7c811..000000000 --- a/vendor/github.com/segmentio/kafka-go/produce.go +++ /dev/null @@ -1,113 +0,0 @@ -package kafka - -import "bufio" - -type produceRequestV2 struct { - RequiredAcks int16 - Timeout int32 - Topics []produceRequestTopicV2 -} - -func (r produceRequestV2) size() int32 { - return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) -} - -func (r produceRequestV2) writeTo(w *bufio.Writer) { - writeInt16(w, r.RequiredAcks) - writeInt32(w, r.Timeout) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) -} - -type produceRequestTopicV2 struct { - TopicName string - Partitions []produceRequestPartitionV2 -} - -func (t produceRequestTopicV2) size() int32 { - return sizeofString(t.TopicName) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t produceRequestTopicV2) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type produceRequestPartitionV2 struct { - Partition int32 - MessageSetSize int32 - MessageSet messageSet -} - -func (p produceRequestPartitionV2) size() int32 { - return 4 + 4 + p.MessageSet.size() -} - -func (p produceRequestPartitionV2) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt32(w, p.MessageSetSize) - p.MessageSet.writeTo(w) -} - -type produceResponseV2 struct { - ThrottleTime int32 - Topics []produceResponseTopicV2 -} - -func (r produceResponseV2) size() int32 { - return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) -} - -func (r produceResponseV2) writeTo(w *bufio.Writer) { - writeInt32(w, r.ThrottleTime) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) -} - -type produceResponseTopicV2 struct { - TopicName string - Partitions []produceResponsePartitionV2 -} - -func (t produceResponseTopicV2) size() int32 { - return sizeofString(t.TopicName) + - sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) -} - -func (t produceResponseTopicV2) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) -} - -type produceResponsePartitionV2 struct { - Partition int32 - ErrorCode int16 - Offset int64 - Timestamp int64 -} - -func (p produceResponsePartitionV2) size() int32 { - return 4 + 2 + 8 + 8 -} - -func (p produceResponsePartitionV2) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt16(w, p.ErrorCode) - writeInt64(w, p.Offset) - writeInt64(w, p.Timestamp) -} - -func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) { - if remain, err = readInt32(r, sz, &p.Partition); err != nil { - return - } - if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { - return - } - if remain, err = readInt64(r, remain, &p.Offset); err != nil { - return - } - if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { - return - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/protocol.go b/vendor/github.com/segmentio/kafka-go/protocol.go deleted file mode 100644 index 30d987aff..000000000 --- a/vendor/github.com/segmentio/kafka-go/protocol.go +++ /dev/null @@ -1,84 +0,0 @@ -package kafka - -import ( - "bufio" - "encoding/binary" - "fmt" -) - -type apiKey int16 - -const ( - produceRequest apiKey = 0 - fetchRequest apiKey = 1 - listOffsetRequest apiKey = 2 - metadataRequest apiKey = 3 - offsetCommitRequest apiKey = 8 - offsetFetchRequest apiKey = 9 - groupCoordinatorRequest apiKey = 10 - joinGroupRequest apiKey = 11 - heartbeatRequest apiKey = 12 - leaveGroupRequest apiKey = 13 - syncGroupRequest apiKey = 14 - describeGroupsRequest apiKey = 15 - listGroupsRequest apiKey = 16 - createTopicsRequest apiKey = 19 - deleteTopicsRequest apiKey = 20 -) - -type apiVersion int16 - -const ( - v0 apiVersion = 0 - v1 apiVersion = 1 - v2 apiVersion = 2 - v3 apiVersion = 3 -) - -type requestHeader struct { - Size int32 - ApiKey int16 - ApiVersion int16 - CorrelationID int32 - ClientID string -} - -func (h requestHeader) size() int32 { - return 4 + 2 + 2 + 4 + sizeofString(h.ClientID) -} - -func (h requestHeader) writeTo(w *bufio.Writer) { - writeInt32(w, h.Size) - writeInt16(w, h.ApiKey) - writeInt16(w, h.ApiVersion) - writeInt32(w, h.CorrelationID) - writeString(w, h.ClientID) -} - -type request interface { - size() int32 - writeTo(*bufio.Writer) -} - -func makeInt8(b []byte) int8 { - return int8(b[0]) -} - -func makeInt16(b []byte) int16 { - return int16(binary.BigEndian.Uint16(b)) -} - -func makeInt32(b []byte) int32 { - return int32(binary.BigEndian.Uint32(b)) -} - -func makeInt64(b []byte) int64 { - return int64(binary.BigEndian.Uint64(b)) -} - -func expectZeroSize(sz int, err error) error { - if err == nil && sz != 0 { - err = fmt.Errorf("reading a response left %d unread bytes", sz) - } - return err -} diff --git a/vendor/github.com/segmentio/kafka-go/read.go b/vendor/github.com/segmentio/kafka-go/read.go deleted file mode 100644 index fee5e5b24..000000000 --- a/vendor/github.com/segmentio/kafka-go/read.go +++ /dev/null @@ -1,376 +0,0 @@ -package kafka - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" -) - -type readable interface { - readFrom(*bufio.Reader, int) (int, error) -} - -var errShortRead = errors.New("not enough bytes available to load the response") - -func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) { - if n > sz { - return sz, errShortRead - } - b, err := r.Peek(n) - if err != nil { - return sz, err - } - f(b) - return discardN(r, sz, n) -} - -func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) { - return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) }) -} - -func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) { - return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) }) -} - -func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) { - return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) }) -} - -func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) { - return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) }) -} - -func readBool(r *bufio.Reader, sz int, v *bool) (int, error) { - return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 }) -} - -func readString(r *bufio.Reader, sz int, v *string) (int, error) { - return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { - *v, remain, err = readNewString(r, sz, n) - return - }) -} - -func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { - var err error - var len int16 - - if sz, err = readInt16(r, sz, &len); err != nil { - return sz, err - } - - n := int(len) - if n > sz { - return sz, errShortRead - } - - return cb(r, sz, n) -} - -func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) { - b, sz, err := readNewBytes(r, sz, n) - return string(b), sz, err -} - -func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) { - return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { - *v, remain, err = readNewBytes(r, sz, n) - return - }) -} - -func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { - var err error - var len int32 - - if sz, err = readInt32(r, sz, &len); err != nil { - return sz, err - } - - n := int(len) - if n > sz { - return sz, errShortRead - } - - return cb(r, sz, n) -} - -func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) { - var err error - var b []byte - - if n > 0 { - b = make([]byte, n) - n, err = io.ReadFull(r, b) - b = b[:n] - sz -= n - } - - return b, sz, err -} - -func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) { - var err error - var len int32 - - if sz, err = readInt32(r, sz, &len); err != nil { - return sz, err - } - - for n := int(len); n > 0; n-- { - if sz, err = cb(r, sz); err != nil { - break - } - } - - return sz, err -} - -func readStringArray(r *bufio.Reader, sz int, v *[]string) (remain int, err error) { - var content []string - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - var value string - if fnRemain, fnErr = readString(r, size, &value); fnErr != nil { - return - } - content = append(content, value) - return - } - if remain, err = readArrayWith(r, sz, fn); err != nil { - return - } - - *v = content - return -} - -func readMapStringInt32(r *bufio.Reader, sz int, v *map[string][]int32) (remain int, err error) { - var len int32 - if remain, err = readInt32(r, sz, &len); err != nil { - return - } - - content := make(map[string][]int32, len) - for i := 0; i < int(len); i++ { - var key string - var values []int32 - - if remain, err = readString(r, remain, &key); err != nil { - return - } - - fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - var value int32 - if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { - return - } - values = append(values, value) - return - } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - content[key] = values - } - *v = content - - return -} - -func read(r *bufio.Reader, sz int, a interface{}) (int, error) { - switch v := a.(type) { - case *int8: - return readInt8(r, sz, v) - case *int16: - return readInt16(r, sz, v) - case *int32: - return readInt32(r, sz, v) - case *int64: - return readInt64(r, sz, v) - case *bool: - return readBool(r, sz, v) - case *string: - return readString(r, sz, v) - case *[]byte: - return readBytes(r, sz, v) - } - switch v := reflect.ValueOf(a).Elem(); v.Kind() { - case reflect.Struct: - return readStruct(r, sz, v) - case reflect.Slice: - return readSlice(r, sz, v) - default: - panic(fmt.Sprintf("unsupported type: %T", a)) - } -} - -func readAll(r *bufio.Reader, sz int, ptrs ...interface{}) (int, error) { - var err error - - for _, ptr := range ptrs { - if sz, err = readPtr(r, sz, ptr); err != nil { - break - } - } - - return sz, err -} - -func readPtr(r *bufio.Reader, sz int, ptr interface{}) (int, error) { - switch v := ptr.(type) { - case *int8: - return readInt8(r, sz, v) - case *int16: - return readInt16(r, sz, v) - case *int32: - return readInt32(r, sz, v) - case *int64: - return readInt64(r, sz, v) - case *string: - return readString(r, sz, v) - case *[]byte: - return readBytes(r, sz, v) - case readable: - return v.readFrom(r, sz) - default: - panic(fmt.Sprintf("unsupported type: %T", v)) - } -} - -func readStruct(r *bufio.Reader, sz int, v reflect.Value) (int, error) { - var err error - for i, n := 0, v.NumField(); i != n; i++ { - if sz, err = read(r, sz, v.Field(i).Addr().Interface()); err != nil { - return sz, err - } - } - return sz, nil -} - -func readSlice(r *bufio.Reader, sz int, v reflect.Value) (int, error) { - var err error - var len int32 - - if sz, err = readInt32(r, sz, &len); err != nil { - return sz, err - } - - if n := int(len); n < 0 { - v.Set(reflect.Zero(v.Type())) - } else { - v.Set(reflect.MakeSlice(v.Type(), n, n)) - - for i := 0; i != n; i++ { - if sz, err = read(r, sz, v.Index(i).Addr().Interface()); err != nil { - return sz, err - } - } - } - - return sz, nil -} - -func readFetchResponseHeader(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { - var n int32 - var p struct { - Partition int32 - ErrorCode int16 - HighwaterMarkOffset int64 - MessageSetSize int32 - } - - if remain, err = readInt32(r, size, &throttle); err != nil { - return - } - - if remain, err = readInt32(r, remain, &n); err != nil { - return - } - - // This error should never trigger, unless there's a bug in the kafka client - // or server. - if n != 1 { - err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) - return - } - - // We ignore the topic name because we've requests messages for a single - // topic, unless there's a bug in the kafka server we will have received - // the name of the topic that we requested. - if remain, err = discardString(r, remain); err != nil { - return - } - - if remain, err = readInt32(r, remain, &n); err != nil { - return - } - - // This error should never trigger, unless there's a bug in the kafka client - // or server. - if n != 1 { - err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) - return - } - - if remain, err = read(r, remain, &p); err != nil { - return - } - - if p.ErrorCode != 0 { - err = Error(p.ErrorCode) - return - } - - // This error should never trigger, unless there's a bug in the kafka client - // or server. - if remain != int(p.MessageSetSize) { - err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", p.MessageSetSize, remain) - return - } - - watermark = p.HighwaterMarkOffset - return -} - -func readMessageHeader(r *bufio.Reader, sz int) (offset int64, attributes int8, timestamp int64, remain int, err error) { - var version int8 - - if remain, err = readInt64(r, sz, &offset); err != nil { - return - } - - // On discarding the message size and CRC: - // --------------------------------------- - // - // - Not sure why kafka gives the message size here, we already have the - // number of remaining bytes in the response and kafka should only truncate - // the trailing message. - // - // - TCP is already taking care of ensuring data integrity, no need to - // waste resources doing it a second time so we just skip the message CRC. - // - if remain, err = discardN(r, remain, 8); err != nil { - return - } - - if remain, err = readInt8(r, remain, &version); err != nil { - return - } - - if remain, err = readInt8(r, remain, &attributes); err != nil { - return - } - - switch version { - case 0: - case 1: - remain, err = readInt64(r, remain, ×tamp) - default: - err = fmt.Errorf("unsupported message version %d found in fetch response", version) - } - - return -} diff --git a/vendor/github.com/segmentio/kafka-go/reader.go b/vendor/github.com/segmentio/kafka-go/reader.go deleted file mode 100644 index fa8e2ef97..000000000 --- a/vendor/github.com/segmentio/kafka-go/reader.go +++ /dev/null @@ -1,1895 +0,0 @@ -package kafka - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "log" - "math" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" -) - -const ( - LastOffset int64 = -1 // The most recent offset available for a partition. - FirstOffset = -2 // The least recent offset available for a partition. -) - -const ( - // defaultCommitRetries holds the number commit attempts to make - // before giving up - defaultCommitRetries = 3 -) - -var ( - errOnlyAvailableWithGroup = errors.New("unavailable when GroupID is not set") - errNotAvailableWithGroup = errors.New("unavailable when GroupID is set") -) - -const ( - // defaultProtocolType holds the default protocol type documented in the - // kafka protocol - // - // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI - defaultProtocolType = "consumer" - - // defaultHeartbeatInterval contains the default time between heartbeats. If - // the coordinator does not receive a heartbeat within the session timeout interval, - // the consumer will be considered dead and the coordinator will rebalance the - // group. - // - // As a rule, the heartbeat interval should be no greater than 1/3 the session timeout - defaultHeartbeatInterval = 3 * time.Second - - // defaultSessionTimeout contains the default interval the coordinator will wait - // for a heartbeat before marking a consumer as dead - defaultSessionTimeout = 30 * time.Second - - // defaultRebalanceTimeout contains the amount of time the coordinator will wait - // for consumers to issue a join group once a rebalance has been requested - defaultRebalanceTimeout = 30 * time.Second - - // defaultRetentionTime holds the length of time a the consumer group will be - // saved by kafka - defaultRetentionTime = time.Hour * 24 -) - -// Reader provides a high-level API for consuming messages from kafka. -// -// A Reader automatically manages reconnections to a kafka server, and -// blocking methods have context support for asynchronous cancellations. -type Reader struct { - // immutable fields of the reader - config ReaderConfig - - // communication channels between the parent reader and its subreaders - msgs chan readerMessage - - // mutable fields of the reader (synchronized on the mutex) - mutex sync.Mutex - join sync.WaitGroup - cancel context.CancelFunc - stop context.CancelFunc - done chan struct{} - commits chan commitRequest - version int64 // version holds the generation of the spawned readers - offset int64 - lag int64 - closed bool - address string // address of group coordinator - generationID int32 // generationID of group - memberID string // memberID of group - - // offsetStash should only be managed by the commitLoopInterval. We store - // it here so that it survives rebalances - offsetStash offsetStash - - // reader stats are all made of atomic values, no need for synchronization. - once uint32 - stctx context.Context - // reader stats are all made of atomic values, no need for synchronization. - // Use a pointer to ensure 64-bit alignment of the values. - stats *readerStats -} - -// useConsumerGroup indicates whether the Reader is part of a consumer group. -func (r *Reader) useConsumerGroup() bool { return r.config.GroupID != "" } - -// useSyncCommits indicates whether the Reader is configured to perform sync or -// async commits. -func (r *Reader) useSyncCommits() bool { return r.config.CommitInterval == 0 } - -// membership returns the group generationID and memberID of the reader. -// -// Only used when config.GroupID != "" -func (r *Reader) membership() (generationID int32, memberID string) { - r.mutex.Lock() - generationID = r.generationID - memberID = r.memberID - r.mutex.Unlock() - return -} - -// lookupCoordinator scans the brokers and looks up the address of the -// coordinator for the group. -// -// Only used when config.GroupID != "" -func (r *Reader) lookupCoordinator() (string, error) { - conn, err := r.connect() - if err != nil { - return "", fmt.Errorf("unable to coordinator to any connect for group, %v: %v\n", r.config.GroupID, err) - } - defer conn.Close() - - out, err := conn.findCoordinator(findCoordinatorRequestV0{ - CoordinatorKey: r.config.GroupID, - }) - if err != nil { - return "", fmt.Errorf("unable to find coordinator for group, %v: %v", r.config.GroupID, err) - } - - address := fmt.Sprintf("%v:%v", out.Coordinator.Host, out.Coordinator.Port) - return address, nil -} - -// refreshCoordinator updates the value of r.address -func (r *Reader) refreshCoordinator() (err error) { - const ( - backoffDelayMin = 100 * time.Millisecond - backoffDelayMax = 1 * time.Second - ) - - for attempt := 0; true; attempt++ { - if attempt != 0 { - if !sleep(r.stctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { - return r.stctx.Err() - } - } - - address, err := r.lookupCoordinator() - if err != nil { - continue - } - - r.mutex.Lock() - oldAddress := r.address - r.address = address - r.mutex.Unlock() - - if address != oldAddress { - r.withLogger(func(l *log.Logger) { - l.Printf("coordinator for group, %v, set to %v\n", r.config.GroupID, address) - }) - } - - break - } - - return nil -} - -// makejoinGroupRequestV1 handles the logic of constructing a joinGroup -// request -func (r *Reader) makejoinGroupRequestV1() (joinGroupRequestV1, error) { - _, memberID := r.membership() - - request := joinGroupRequestV1{ - GroupID: r.config.GroupID, - MemberID: memberID, - SessionTimeout: int32(r.config.SessionTimeout / time.Millisecond), - RebalanceTimeout: int32(r.config.RebalanceTimeout / time.Millisecond), - ProtocolType: defaultProtocolType, - } - - for _, balancer := range r.config.GroupBalancers { - userData, err := balancer.UserData() - if err != nil { - return joinGroupRequestV1{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %v\n", balancer.ProtocolName(), err) - } - request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV1{ - ProtocolName: balancer.ProtocolName(), - ProtocolMetadata: groupMetadata{ - Version: 1, - Topics: []string{r.config.Topic}, - UserData: userData, - }.bytes(), - }) - } - - return request, nil -} - -// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into []GroupMember -func (r *Reader) makeMemberProtocolMetadata(in []joinGroupResponseMemberV1) ([]GroupMember, error) { - members := make([]GroupMember, 0, len(in)) - for _, item := range in { - metadata := groupMetadata{} - reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata)) - if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 { - return nil, fmt.Errorf("unable to read metadata for member, %v: %v\n", item.MemberID, err) - } - - members = append(members, GroupMember{ - ID: item.MemberID, - Topics: metadata.Topics, - UserData: metadata.UserData, - }) - } - return members, nil -} - -// partitionReader is an internal interface used to simplify unit testing -type partitionReader interface { - // ReadPartitions mirrors Conn.ReadPartitions - ReadPartitions(topics ...string) (partitions []Partition, err error) -} - -// assignTopicPartitions uses the selected GroupBalancer to assign members to -// their various partitions -func (r *Reader) assignTopicPartitions(conn partitionReader, group joinGroupResponseV1) (GroupMemberAssignments, error) { - r.withLogger(func(l *log.Logger) { - l.Println("selected as leader for group,", r.config.GroupID) - }) - - balancer, ok := findGroupBalancer(group.GroupProtocol, r.config.GroupBalancers) - if !ok { - return nil, fmt.Errorf("unable to find selected balancer, %v, for group, %v", group.GroupProtocol, r.config.GroupID) - } - - members, err := r.makeMemberProtocolMetadata(group.Members) - if err != nil { - return nil, fmt.Errorf("unable to construct MemberProtocolMetadata: %v", err) - } - - topics := extractTopics(members) - partitions, err := conn.ReadPartitions(topics...) - if err != nil { - return nil, fmt.Errorf("unable to read partitions: %v", err) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("using '%v' balancer to assign group, %v\n", group.GroupProtocol, r.config.GroupID) - for _, member := range members { - l.Printf("found member: %v/%#v", member.ID, member.UserData) - } - for _, partition := range partitions { - l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID) - } - }) - - return balancer.AssignGroups(members, partitions), nil -} - -func (r *Reader) leaveGroup(conn *Conn) error { - _, memberID := r.membership() - _, err := conn.leaveGroup(leaveGroupRequestV0{ - GroupID: r.config.GroupID, - MemberID: memberID, - }) - if err != nil { - return fmt.Errorf("leave group failed for group, %v, and member, %v: %v", r.config.GroupID, memberID, err) - } - - return nil -} - -// joinGroup attempts to join the reader to the consumer group. -// Returns GroupMemberAssignments is this Reader was selected as -// the leader. Otherwise, GroupMemberAssignments will be nil. -// -// Possible kafka error codes returned: -// * GroupLoadInProgress: -// * GroupCoordinatorNotAvailable: -// * NotCoordinatorForGroup: -// * InconsistentGroupProtocol: -// * InvalidSessionTimeout: -// * GroupAuthorizationFailed: -func (r *Reader) joinGroup() (GroupMemberAssignments, error) { - conn, err := r.coordinator() - if err != nil { - return nil, err - } - defer conn.Close() - - request, err := r.makejoinGroupRequestV1() - if err != nil { - return nil, err - } - - response, err := conn.joinGroup(request) - if err != nil { - switch err { - case UnknownMemberId: - r.mutex.Lock() - r.memberID = "" - r.mutex.Unlock() - return nil, fmt.Errorf("joinGroup failed: %v", err) - - default: - return nil, fmt.Errorf("joinGroup failed: %v", err) - } - } - - // Extract our membership and generationID from the response - r.mutex.Lock() - oldGenerationID := r.generationID - oldMemberID := r.memberID - r.generationID = response.GenerationID - r.memberID = response.MemberID - r.mutex.Unlock() - - if oldGenerationID != response.GenerationID || oldMemberID != response.MemberID { - r.withLogger(func(l *log.Logger) { - l.Printf("response membership changed. generationID: %v => %v, memberID: '%v' => '%v'\n", - oldGenerationID, - response.GenerationID, - oldMemberID, - response.MemberID, - ) - }) - } - - var assignments GroupMemberAssignments - if iAmLeader := response.MemberID == response.LeaderID; iAmLeader { - v, err := r.assignTopicPartitions(conn, response) - if err != nil { - _ = r.leaveGroup(conn) - return nil, err - } - assignments = v - - r.withLogger(func(l *log.Logger) { - for memberID, assignment := range assignments { - for topic, partitions := range assignment { - l.Printf("assigned member/topic/partitions %v/%v/%v\n", memberID, topic, partitions) - } - } - }) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("joinGroup succeeded for response, %v. generationID=%v, memberID=%v\n", r.config.GroupID, response.GenerationID, response.MemberID) - }) - - return assignments, nil -} - -func (r *Reader) makeSyncGroupRequestV0(memberAssignments GroupMemberAssignments) syncGroupRequestV0 { - generationID, memberID := r.membership() - request := syncGroupRequestV0{ - GroupID: r.config.GroupID, - GenerationID: generationID, - MemberID: memberID, - } - - if memberAssignments != nil { - request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV0, 0, 1) - - for memberID, topics := range memberAssignments { - topics32 := make(map[string][]int32) - for topic, partitions := range topics { - partitions32 := make([]int32, len(partitions)) - for i := range partitions { - partitions32[i] = int32(partitions[i]) - } - topics32[topic] = partitions32 - } - request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV0{ - MemberID: memberID, - MemberAssignments: groupAssignment{ - Version: 1, - Topics: topics32, - }.bytes(), - }) - } - - r.withErrorLogger(func(logger *log.Logger) { - logger.Printf("Syncing %d assignments for generation %d as member %s", len(request.GroupAssignments), generationID, memberID) - }) - } - - return request -} - -// syncGroup completes the consumer group handshake by accepting the -// memberAssignments (if this Reader is the leader) and returning this -// Readers subscriptions topic => partitions -// -// Possible kafka error codes returned: -// * GroupCoordinatorNotAvailable: -// * NotCoordinatorForGroup: -// * IllegalGeneration: -// * RebalanceInProgress: -// * GroupAuthorizationFailed: -func (r *Reader) syncGroup(memberAssignments GroupMemberAssignments) (map[string][]int32, error) { - conn, err := r.coordinator() - if err != nil { - return nil, err - } - defer conn.Close() - - request := r.makeSyncGroupRequestV0(memberAssignments) - response, err := conn.syncGroups(request) - if err != nil { - switch err { - case RebalanceInProgress: - // don't leave the group - return nil, fmt.Errorf("syncGroup failed: %v", err) - - case UnknownMemberId: - r.mutex.Lock() - r.memberID = "" - r.mutex.Unlock() - _ = r.leaveGroup(conn) - return nil, fmt.Errorf("syncGroup failed: %v", err) - - default: - _ = r.leaveGroup(conn) - return nil, fmt.Errorf("syncGroup failed: %v", err) - } - } - - assignments := groupAssignment{} - reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments)) - if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil { - _ = r.leaveGroup(conn) - return nil, fmt.Errorf("unable to read SyncGroup response for group, %v: %v\n", r.config.GroupID, err) - } - - if len(assignments.Topics) == 0 { - generation, memberID := r.membership() - return nil, fmt.Errorf("received empty assignments for group, %v as member %s for generation %d", r.config.GroupID, memberID, generation) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("sync group finished for group, %v\n", r.config.GroupID) - }) - - return assignments.Topics, nil -} - -func (r *Reader) rebalance() (map[string][]int32, error) { - r.withLogger(func(l *log.Logger) { - l.Printf("rebalancing consumer group, %v", r.config.GroupID) - }) - - if err := r.refreshCoordinator(); err != nil { - return nil, err - } - - members, err := r.joinGroup() - if err != nil { - return nil, err - } - - assignments, err := r.syncGroup(members) - if err != nil { - return nil, err - } - - return assignments, nil -} - -func (r *Reader) unsubscribe() error { - r.cancel() - r.join.Wait() - return nil -} - -func (r *Reader) fetchOffsets(subs map[string][]int32) (map[int]int64, error) { - conn, err := r.coordinator() - if err != nil { - return nil, err - } - defer conn.Close() - - partitions := subs[r.config.Topic] - offsets, err := conn.offsetFetch(offsetFetchRequestV1{ - GroupID: r.config.GroupID, - Topics: []offsetFetchRequestV1Topic{ - { - Topic: r.config.Topic, - Partitions: partitions, - }, - }, - }) - if err != nil { - return nil, err - } - - offsetsByPartition := map[int]int64{} - for _, pr := range offsets.Responses[0].PartitionResponses { - for _, partition := range partitions { - if partition == pr.Partition { - offset := pr.Offset - if offset < 0 { - // No offset stored - offset = FirstOffset - } - offsetsByPartition[int(partition)] = offset - } - } - } - - return offsetsByPartition, nil -} - -func (r *Reader) subscribe(subs map[string][]int32) error { - if len(subs[r.config.Topic]) == 0 { - return nil - } - - offsetsByPartition, err := r.fetchOffsets(subs) - if err != nil { - if conn, err := r.coordinator(); err == nil { - // make an attempt at leaving the group - _ = r.leaveGroup(conn) - conn.Close() - } - - return err - } - - r.mutex.Lock() - r.start(offsetsByPartition) - r.mutex.Unlock() - - r.withLogger(func(l *log.Logger) { - l.Printf("subscribed to partitions: %+v", offsetsByPartition) - }) - - return nil -} - -// connect returns a connection to ANY broker -func (r *Reader) connect() (conn *Conn, err error) { - for _, broker := range r.config.Brokers { - if conn, err = r.config.Dialer.Dial("tcp", broker); err == nil { - return - } - } - return // err will be non-nil -} - -// coordinator returns a connection to the coordinator for this group -func (r *Reader) coordinator() (*Conn, error) { - r.mutex.Lock() - address := r.address - r.mutex.Unlock() - - conn, err := r.config.Dialer.DialContext(r.stctx, "tcp", address) - if err != nil { - return nil, fmt.Errorf("unable to connect to coordinator, %v", address) - } - - return conn, nil -} - -func (r *Reader) waitThrottleTime(throttleTimeMS int32) { - if throttleTimeMS == 0 { - return - } - - t := time.NewTimer(time.Duration(throttleTimeMS) * time.Millisecond) - defer t.Stop() - - select { - case <-r.stctx.Done(): - return - case <-t.C: - } -} - -// heartbeat sends heartbeat to coordinator at the interval defined by -// ReaderConfig.HeartbeatInterval -func (r *Reader) heartbeat(conn *Conn) error { - generationID, memberID := r.membership() - if generationID == 0 && memberID == "" { - return nil - } - - _, err := conn.heartbeat(heartbeatRequestV0{ - GroupID: r.config.GroupID, - GenerationID: generationID, - MemberID: memberID, - }) - if err != nil { - return fmt.Errorf("heartbeat failed: %v", err) - } - - return nil -} - -func (r *Reader) heartbeatLoop(conn *Conn) func(stop <-chan struct{}) { - return func(stop <-chan struct{}) { - r.withLogger(func(l *log.Logger) { - l.Printf("started heartbeat for group, %v [%v]", r.config.GroupID, r.config.HeartbeatInterval) - }) - defer r.withLogger(func(l *log.Logger) { - l.Println("stopped heartbeat for group,", r.config.GroupID) - }) - - ticker := time.NewTicker(r.config.HeartbeatInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := r.heartbeat(conn); err != nil { - return - } - - case <-stop: - return - } - } - } -} - -type offsetCommitter interface { - offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) -} - -func (r *Reader) commitOffsets(conn offsetCommitter, offsetStash offsetStash) error { - if len(offsetStash) == 0 { - return nil - } - - generationID, memberID := r.membership() - request := offsetCommitRequestV2{ - GroupID: r.config.GroupID, - GenerationID: generationID, - MemberID: memberID, - RetentionTime: int64(r.config.RetentionTime / time.Millisecond), - } - - for topic, partitions := range offsetStash { - t := offsetCommitRequestV2Topic{Topic: topic} - for partition, offset := range partitions { - t.Partitions = append(t.Partitions, offsetCommitRequestV2Partition{ - Partition: int32(partition), - Offset: offset, - }) - } - request.Topics = append(request.Topics, t) - } - - if _, err := conn.offsetCommit(request); err != nil { - return fmt.Errorf("unable to commit offsets for group, %v: %v", r.config.GroupID, err) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("committed offsets: %v", offsetStash) - }) - - return nil -} - -// commitOffsetsWithRetry attempts to commit the specified offsets and retries -// up to the specified number of times -func (r *Reader) commitOffsetsWithRetry(conn offsetCommitter, offsetStash offsetStash, retries int) (err error) { - const ( - backoffDelayMin = 100 * time.Millisecond - backoffDelayMax = 5 * time.Second - ) - - for attempt := 0; attempt < retries; attempt++ { - if attempt != 0 { - if !sleep(r.stctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { - return - } - } - - if err = r.commitOffsets(conn, offsetStash); err == nil { - return - } - } - - return // err will not be nil -} - -// offsetStash holds offsets by topic => partition => offset -type offsetStash map[string]map[int]int64 - -// merge updates the offsetStash with the offsets from the provided messages -func (o offsetStash) merge(commits []commit) { - for _, c := range commits { - offsetsByPartition, ok := o[c.topic] - if !ok { - offsetsByPartition = map[int]int64{} - o[c.topic] = offsetsByPartition - } - - if offset, ok := offsetsByPartition[c.partition]; !ok || c.offset > offset { - offsetsByPartition[c.partition] = c.offset - } - } -} - -// reset clears the contents of the offsetStash -func (o offsetStash) reset() { - for key := range o { - delete(o, key) - } -} - -// commitLoopImmediate handles each commit synchronously -func (r *Reader) commitLoopImmediate(conn offsetCommitter, stop <-chan struct{}) { - offsetsByTopicAndPartition := offsetStash{} - - for { - select { - case <-stop: - return - - case req := <-r.commits: - offsetsByTopicAndPartition.merge(req.commits) - req.errch <- r.commitOffsetsWithRetry(conn, offsetsByTopicAndPartition, defaultCommitRetries) - offsetsByTopicAndPartition.reset() - } - } -} - -// commitLoopInterval handles each commit asynchronously with a period defined -// by ReaderConfig.CommitInterval -func (r *Reader) commitLoopInterval(conn offsetCommitter, stop <-chan struct{}) { - ticker := time.NewTicker(r.config.HeartbeatInterval) - defer ticker.Stop() - - commit := func() { - if err := r.commitOffsetsWithRetry(conn, r.offsetStash, defaultCommitRetries); err != nil { - r.withErrorLogger(func(l *log.Logger) { l.Print(err) }) - } else { - r.offsetStash.reset() - } - } - - for { - select { - case <-stop: - commit() - return - - case <-ticker.C: - commit() - - case req := <-r.commits: - r.offsetStash.merge(req.commits) - } - } -} - -// commitLoop processes commits off the commit chan -func (r *Reader) commitLoop(conn *Conn) func(stop <-chan struct{}) { - return func(stop <-chan struct{}) { - r.withLogger(func(l *log.Logger) { - l.Println("started commit for group,", r.config.GroupID) - }) - defer r.withLogger(func(l *log.Logger) { - l.Println("stopped commit for group,", r.config.GroupID) - }) - - if r.config.CommitInterval == 0 { - r.commitLoopImmediate(conn, stop) - } else { - r.commitLoopInterval(conn, stop) - } - } -} - -// handshake performs the necessary incantations to join this Reader to the desired -// consumer group. handshake will be called whenever the group is disrupted -// (member join, member leave, coordinator changed, etc) -func (r *Reader) handshake() error { - // always clear prior to subscribe - r.unsubscribe() - - // rebalance and fetch assignments - assignments, err := r.rebalance() - if err != nil { - return fmt.Errorf("rebalance failed for consumer group, %v: %v", r.config.GroupID, err) - } - - conn, err := r.coordinator() - if err != nil { - return fmt.Errorf("heartbeat: unable to connect to coordinator: %v", err) - } - defer conn.Close() - - rg := &runGroup{} - rg = rg.WithContext(r.stctx) - rg.Go(r.heartbeatLoop(conn)) - rg.Go(r.commitLoop(conn)) - - // subscribe to assignments - if err := r.subscribe(assignments); err != nil { - rg.Stop() - return fmt.Errorf("subscribe failed for consumer group, %v: %v\n", r.config.GroupID, err) - } - - rg.Wait() - - return nil -} - -// run provides the main consumer group management loop. Each iteration performs the -// handshake to join the Reader to the consumer group. -func (r *Reader) run() { - defer close(r.done) - - if !r.useConsumerGroup() { - return - } - - r.withLogger(func(l *log.Logger) { - l.Printf("entering loop for consumer group, %v\n", r.config.GroupID) - }) - - for { - if err := r.handshake(); err != nil { - r.withErrorLogger(func(l *log.Logger) { - l.Println(err) - }) - } - - select { - case <-r.stctx.Done(): - return - default: - } - } -} - -// ReaderConfig is a configuration object used to create new instances of -// Reader. -type ReaderConfig struct { - // The list of broker addresses used to connect to the kafka cluster. - Brokers []string - - // GroupID holds the optional consumer group id. If GroupID is specified, then - // Partition should NOT be specified e.g. 0 - GroupID string - - // The topic to read messages from. - Topic string - - // Partition to read messages from. Either Partition or GroupID may - // be assigned, but not both - Partition int - - // An dialer used to open connections to the kafka server. This field is - // optional, if nil, the default dialer is used instead. - Dialer *Dialer - - // The capacity of the internal message queue, defaults to 100 if none is - // set. - QueueCapacity int - - // Min and max number of bytes to fetch from kafka in each request. - MinBytes int - MaxBytes int - - // Maximum amount of time to wait for new data to come when fetching batches - // of messages from kafka. - MaxWait time.Duration - - // ReadLagInterval sets the frequency at which the reader lag is updated. - // Setting this field to a negative value disables lag reporting. - ReadLagInterval time.Duration - - // GroupBalancers is the priority-ordered list of client-side consumer group - // balancing strategies that will be offered to the coordinator. The first - // strategy that all group members support will be chosen by the leader. - // - // Default: [Range, RoundRobin] - // - // Only used when GroupID is set - GroupBalancers []GroupBalancer - - // HeartbeatInterval sets the optional frequency at which the reader sends the consumer - // group heartbeat update. - // - // Default: 3s - // - // Only used when GroupID is set - HeartbeatInterval time.Duration - - // CommitInterval indicates the interval at which offsets are committed to - // the broker. If 0, commits will be handled synchronously. - // - // Defaults to 1s - // - // Only used when GroupID is set - CommitInterval time.Duration - - // SessionTimeout optionally sets the length of time that may pass without a heartbeat - // before the coordinator considers the consumer dead and initiates a rebalance. - // - // Default: 30s - // - // Only used when GroupID is set - SessionTimeout time.Duration - - // RebalanceTimeout optionally sets the length of time the coordinator will wait - // for members to join as part of a rebalance. For kafka servers under higher - // load, it may be useful to set this value higher. - // - // Default: 30s - // - // Only used when GroupID is set - RebalanceTimeout time.Duration - - // RetentionTime optionally sets the length of time the consumer group will be saved - // by the broker - // - // Default: 24h - // - // Only used when GroupID is set - RetentionTime time.Duration - - // If not nil, specifies a logger used to report internal changes within the - // reader. - Logger *log.Logger - - // ErrorLogger is the logger used to report errors. If nil, the reader falls - // back to using Logger instead. - ErrorLogger *log.Logger -} - -// ReaderStats is a data structure returned by a call to Reader.Stats that exposes -// details about the behavior of the reader. -type ReaderStats struct { - Dials int64 `metric:"kafka.reader.dial.count" type:"counter"` - Fetches int64 `metric:"kafka.reader.fetch.count" type:"counter"` - Messages int64 `metric:"kafka.reader.message.count" type:"counter"` - Bytes int64 `metric:"kafka.reader.message.bytes" type:"counter"` - Rebalances int64 `metric:"kafka.reader.rebalance.count" type:"counter"` - Timeouts int64 `metric:"kafka.reader.timeout.count" type:"counter"` - Errors int64 `metric:"kafka.reader.error.count" type:"counter"` - - DialTime DurationStats `metric:"kafka.reader.dial.seconds"` - ReadTime DurationStats `metric:"kafka.reader.read.seconds"` - WaitTime DurationStats `metric:"kafka.reader.wait.seconds"` - FetchSize SummaryStats `metric:"kafka.reader.fetch.size"` - FetchBytes SummaryStats `metric:"kafka.reader.fetch.bytes"` - - Offset int64 `metric:"kafka.reader.offset" type:"gauge"` - Lag int64 `metric:"kafka.reader.lag" type:"gauge"` - MinBytes int64 `metric:"kafka.reader.fetch_bytes.min" type:"gauge"` - MaxBytes int64 `metric:"kafka.reader.fetch_bytes.max" type:"gauge"` - MaxWait time.Duration `metric:"kafka.reader.fetch_wait.max" type:"gauge"` - QueueLength int64 `metric:"kafka.reader.queue.length" type:"gauge"` - QueueCapacity int64 `metric:"kafka.reader.queue.capacity" type:"gauge"` - - ClientID string `tag:"client_id"` - Topic string `tag:"topic"` - Partition string `tag:"partition"` - - // The original `Fetches` field had a typo where the metric name was called - // "kafak..." instead of "kafka...", in order to offer time to fix monitors - // that may be relying on this mistake we are temporarily introducing this - // field. - DeprecatedFetchesWithTypo int64 `metric:"kafak.reader.fetch.count" type:"counter"` -} - -// readerStats is a struct that contains statistics on a reader. -type readerStats struct { - dials counter - fetches counter - messages counter - bytes counter - rebalances counter - timeouts counter - errors counter - dialTime summary - readTime summary - waitTime summary - fetchSize summary - fetchBytes summary - offset gauge - lag gauge - partition string -} - -// NewReader creates and returns a new Reader configured with config. -// The offset is initialized to FirstOffset. -func NewReader(config ReaderConfig) *Reader { - if len(config.Brokers) == 0 { - panic("cannot create a new kafka reader with an empty list of broker addresses") - } - - if len(config.Topic) == 0 { - panic("cannot create a new kafka reader with an empty topic") - } - - if config.Partition < 0 || config.Partition >= math.MaxInt32 { - panic(fmt.Sprintf("partition number out of bounds: %d", config.Partition)) - } - - if config.MinBytes > config.MaxBytes { - panic(fmt.Sprintf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes)) - } - - if config.MinBytes < 0 { - panic(fmt.Sprintf("invalid negative minimum batch size (min = %d)", config.MinBytes)) - } - - if config.MaxBytes < 0 { - panic(fmt.Sprintf("invalid negative maximum batch size (max = %d)", config.MaxBytes)) - } - - if config.GroupID != "" && config.Partition != 0 { - panic("either Partition or GroupID may be specified, but not both") - } - - if config.GroupID != "" { - if len(config.GroupBalancers) == 0 { - config.GroupBalancers = []GroupBalancer{ - RangeGroupBalancer{}, - RoundRobinGroupBalancer{}, - } - } - - if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval)) - } - - if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("SessionTimeout out of bounds: %d", config.SessionTimeout)) - } - - if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout)) - } - - if config.RetentionTime < 0 { - panic(fmt.Sprintf("RetentionTime out of bounds: %d", config.RetentionTime)) - } - - if config.CommitInterval < 0 || (config.CommitInterval/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("CommitInterval out of bounds: %d", config.CommitInterval)) - } - } - - if config.Dialer == nil { - config.Dialer = DefaultDialer - } - - if config.MaxBytes == 0 { - config.MaxBytes = 1e6 // 1 MB - } - - if config.MinBytes == 0 { - config.MinBytes = config.MaxBytes - } - - if config.MaxWait == 0 { - config.MaxWait = 10 * time.Second - } - - if config.ReadLagInterval == 0 { - config.ReadLagInterval = 1 * time.Minute - } - - if config.HeartbeatInterval == 0 { - config.HeartbeatInterval = defaultHeartbeatInterval - } - - if config.SessionTimeout == 0 { - config.SessionTimeout = defaultSessionTimeout - } - - if config.RebalanceTimeout == 0 { - config.RebalanceTimeout = defaultRebalanceTimeout - } - - if config.RetentionTime == 0 { - config.RetentionTime = defaultRetentionTime - } - - if config.QueueCapacity == 0 { - config.QueueCapacity = 100 - } - - // when configured as a consumer group; stats should report a partition of -1 - readerStatsPartition := config.Partition - if config.GroupID != "" { - readerStatsPartition = -1 - } - - // when configured as a consume group, start version as 1 to ensure that only - // the rebalance function will start readers - version := int64(0) - if config.GroupID != "" { - version = 1 - } - - stctx, stop := context.WithCancel(context.Background()) - r := &Reader{ - config: config, - msgs: make(chan readerMessage, config.QueueCapacity), - cancel: func() {}, - done: make(chan struct{}), - commits: make(chan commitRequest, config.QueueCapacity), - stop: stop, - offset: FirstOffset, - stctx: stctx, - stats: &readerStats{ - dialTime: makeSummary(), - readTime: makeSummary(), - waitTime: makeSummary(), - fetchSize: makeSummary(), - fetchBytes: makeSummary(), - // Generate the string representation of the partition number only - // once when the reader is created. - partition: strconv.Itoa(readerStatsPartition), - }, - version: version, - offsetStash: offsetStash{}, - } - - go r.run() - - return r -} - -// Config returns the reader's configuration. -func (r *Reader) Config() ReaderConfig { - return r.config -} - -// Close closes the stream, preventing the program from reading any more -// messages from it. -func (r *Reader) Close() error { - atomic.StoreUint32(&r.once, 1) - - r.mutex.Lock() - closed := r.closed - r.closed = true - r.mutex.Unlock() - - r.cancel() - r.stop() - r.join.Wait() - - if r.useConsumerGroup() { - // gracefully attempt to leave the consumer group on close - if generationID, membershipID := r.membership(); generationID > 0 && membershipID != "" { - if conn, err := r.coordinator(); err == nil { - _ = r.leaveGroup(conn) - } - } - } - - <-r.done - - if !closed { - close(r.msgs) - } - - return nil -} - -// ReadMessage reads and return the next message from the r. The method call -// blocks until a message becomes available, or an error occurs. The program -// may also specify a context to asynchronously cancel the blocking operation. -// -// The method returns io.EOF to indicate that the reader has been closed. -// -// If consumer groups are used, ReadMessage will automatically commit the -// offset when called. -func (r *Reader) ReadMessage(ctx context.Context) (Message, error) { - m, err := r.FetchMessage(ctx) - if err != nil { - return Message{}, err - } - - if r.useConsumerGroup() { - if err := r.CommitMessages(ctx, m); err != nil { - return Message{}, err - } - } - - return m, nil -} - -// FetchMessage reads and return the next message from the r. The method call -// blocks until a message becomes available, or an error occurs. The program -// may also specify a context to asynchronously cancel the blocking operation. -// -// The method returns io.EOF to indicate that the reader has been closed. -// -// FetchMessage does not commit offsets automatically when using consumer groups. -// Use CommitMessages to commit the offset. -func (r *Reader) FetchMessage(ctx context.Context) (Message, error) { - r.activateReadLag() - - for { - r.mutex.Lock() - - if !r.closed && r.version == 0 { - r.start(map[int]int64{r.config.Partition: r.offset}) - } - - version := r.version - r.mutex.Unlock() - - select { - case <-ctx.Done(): - return Message{}, ctx.Err() - - case m, ok := <-r.msgs: - if !ok { - return Message{}, io.EOF - } - - if m.version >= version { - r.mutex.Lock() - - switch { - case m.error != nil: - case version == r.version: - r.offset = m.message.Offset + 1 - r.lag = m.watermark - r.offset - } - - r.mutex.Unlock() - - switch m.error { - case nil: - case io.EOF: - // io.EOF is used as a marker to indicate that the stream - // has been closed, in case it was received from the inner - // reader we don't want to confuse the program and replace - // the error with io.ErrUnexpectedEOF. - m.error = io.ErrUnexpectedEOF - } - - return m.message, m.error - } - } - } -} - -// CommitMessages commits the list of messages passed as argument. The program -// may pass a context to asynchronously cancel the commit operation when it was -// configured to be blocking. -func (r *Reader) CommitMessages(ctx context.Context, msgs ...Message) error { - if !r.useConsumerGroup() { - return errOnlyAvailableWithGroup - } - - var errch <-chan error - var sync = r.useSyncCommits() - var creq = commitRequest{ - commits: makeCommits(msgs...), - } - - if sync { - ch := make(chan error, 1) - errch, creq.errch = ch, ch - } - - select { - case r.commits <- creq: - case <-ctx.Done(): - return ctx.Err() - case <-r.stctx.Done(): - // This context is used to ensure we don't allow commits after the - // reader was closed. - return io.ErrClosedPipe - } - - if !sync { - return nil - } - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-errch: - return err - } -} - -// ReadLag returns the current lag of the reader by fetching the last offset of -// the topic and partition and computing the difference between that value and -// the offset of the last message returned by ReadMessage. -// -// This method is intended to be used in cases where a program may be unable to -// call ReadMessage to update the value returned by Lag, but still needs to get -// an up to date estimation of how far behind the reader is. For example when -// the consumer is not ready to process the next message. -// -// The function returns a lag of zero when the reader's current offset is -// negative. -func (r *Reader) ReadLag(ctx context.Context) (lag int64, err error) { - if r.useConsumerGroup() { - return 0, errNotAvailableWithGroup - } - - type offsets struct { - first int64 - last int64 - } - - offch := make(chan offsets, 1) - errch := make(chan error, 1) - - go func() { - var off offsets - var err error - - for _, broker := range r.config.Brokers { - var conn *Conn - - if conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition); err != nil { - continue - } - - deadline, _ := ctx.Deadline() - conn.SetDeadline(deadline) - - off.first, off.last, err = conn.ReadOffsets() - conn.Close() - - if err == nil { - break - } - } - - if err != nil { - errch <- err - } else { - offch <- off - } - }() - - select { - case off := <-offch: - switch cur := r.Offset(); { - case cur == FirstOffset: - lag = off.last - off.first - - case cur == LastOffset: - lag = 0 - - default: - lag = off.last - cur - } - case err = <-errch: - case <-ctx.Done(): - err = ctx.Err() - } - - return -} - -// Offset returns the current absolute offset of the reader, or -1 -// if r is backed by a consumer group. -func (r *Reader) Offset() int64 { - if r.useConsumerGroup() { - return -1 - } - - r.mutex.Lock() - offset := r.offset - r.mutex.Unlock() - r.withLogger(func(log *log.Logger) { - log.Printf("looking up offset of kafka reader for partition %d of %s: %d", r.config.Partition, r.config.Topic, offset) - }) - return offset -} - -// Lag returns the lag of the last message returned by ReadMessage, or -1 -// if r is backed by a consumer group. -func (r *Reader) Lag() int64 { - if r.useConsumerGroup() { - return -1 - } - - r.mutex.Lock() - lag := r.lag - r.mutex.Unlock() - return lag -} - -// SetOffset changes the offset from which the next batch of messages will be -// read. The method fails with io.ErrClosedPipe if the reader has already been closed. -// -// From version 0.2.0, FirstOffset and LastOffset can be used to indicate the first -// or last available offset in the partition. Please note while -1 and -2 were accepted -// to indicate the first or last offset in previous versions, the meanings of the numbers -// were swapped in 0.2.0 to match the meanings in other libraries and the Kafka protocol -// specification. -func (r *Reader) SetOffset(offset int64) error { - if r.useConsumerGroup() { - return errNotAvailableWithGroup - } - - var err error - r.mutex.Lock() - - if r.closed { - err = io.ErrClosedPipe - } else if offset != r.offset { - r.withLogger(func(log *log.Logger) { - log.Printf("setting the offset of the kafka reader for partition %d of %s from %d to %d", - r.config.Partition, r.config.Topic, r.offset, offset) - }) - r.offset = offset - - if r.version != 0 { - r.start(map[int]int64{r.config.Partition: r.offset}) - } - - r.activateReadLag() - } - - r.mutex.Unlock() - return err -} - -// Stats returns a snapshot of the reader stats since the last time the method -// was called, or since the reader was created if it is called for the first -// time. -// -// A typical use of this method is to spawn a goroutine that will periodically -// call Stats on a kafka reader and report the metrics to a stats collection -// system. -func (r *Reader) Stats() ReaderStats { - stats := ReaderStats{ - Dials: r.stats.dials.snapshot(), - Fetches: r.stats.fetches.snapshot(), - Messages: r.stats.messages.snapshot(), - Bytes: r.stats.bytes.snapshot(), - Rebalances: r.stats.rebalances.snapshot(), - Timeouts: r.stats.timeouts.snapshot(), - Errors: r.stats.errors.snapshot(), - DialTime: r.stats.dialTime.snapshotDuration(), - ReadTime: r.stats.readTime.snapshotDuration(), - WaitTime: r.stats.waitTime.snapshotDuration(), - FetchSize: r.stats.fetchSize.snapshot(), - FetchBytes: r.stats.fetchBytes.snapshot(), - Offset: r.stats.offset.snapshot(), - Lag: r.stats.lag.snapshot(), - MinBytes: int64(r.config.MinBytes), - MaxBytes: int64(r.config.MaxBytes), - MaxWait: r.config.MaxWait, - QueueLength: int64(len(r.msgs)), - QueueCapacity: int64(cap(r.msgs)), - ClientID: r.config.Dialer.ClientID, - Topic: r.config.Topic, - Partition: r.stats.partition, - } - // TODO: remove when we get rid of the deprecated field. - stats.DeprecatedFetchesWithTypo = stats.Fetches - return stats -} - -func (r *Reader) withLogger(do func(*log.Logger)) { - if r.config.Logger != nil { - do(r.config.Logger) - } -} - -func (r *Reader) withErrorLogger(do func(*log.Logger)) { - if r.config.ErrorLogger != nil { - do(r.config.ErrorLogger) - } else { - r.withLogger(do) - } -} - -func (r *Reader) activateReadLag() { - if r.config.ReadLagInterval > 0 && atomic.CompareAndSwapUint32(&r.once, 0, 1) { - // read lag will only be calculated when not using consumer groups - // todo discuss how capturing read lag should interact with rebalancing - if !r.useConsumerGroup() { - go r.readLag(r.stctx) - } - } -} - -func (r *Reader) readLag(ctx context.Context) { - ticker := time.NewTicker(r.config.ReadLagInterval) - defer ticker.Stop() - - for { - timeout, cancel := context.WithTimeout(ctx, r.config.ReadLagInterval/2) - lag, err := r.ReadLag(timeout) - cancel() - - if err != nil { - r.stats.errors.observe(1) - r.withErrorLogger(func(log *log.Logger) { - log.Printf("kafka reader failed to read lag of partition %d of %s", r.config.Partition, r.config.Topic) - }) - } else { - r.stats.lag.observe(lag) - } - - select { - case <-ticker.C: - case <-ctx.Done(): - return - } - } -} - -func (r *Reader) start(offsetsByPartition map[int]int64) { - if r.closed { - // don't start child reader if parent Reader is closed - return - } - - ctx, cancel := context.WithCancel(context.Background()) - - r.cancel() // always cancel the previous reader - r.cancel = cancel - r.version++ - - r.join.Add(len(offsetsByPartition)) - for partition, offset := range offsetsByPartition { - go func(ctx context.Context, partition int, offset int64, join *sync.WaitGroup) { - defer join.Done() - - (&reader{ - dialer: r.config.Dialer, - logger: r.config.Logger, - errorLogger: r.config.ErrorLogger, - brokers: r.config.Brokers, - topic: r.config.Topic, - partition: partition, - minBytes: r.config.MinBytes, - maxBytes: r.config.MaxBytes, - maxWait: r.config.MaxWait, - version: r.version, - msgs: r.msgs, - stats: r.stats, - }).run(ctx, offset) - }(ctx, partition, offset, &r.join) - } -} - -// A reader reads messages from kafka and produces them on its channels, it's -// used as an way to asynchronously fetch messages while the main program reads -// them using the high level reader API. -type reader struct { - dialer *Dialer - logger *log.Logger - errorLogger *log.Logger - brokers []string - topic string - partition int - minBytes int - maxBytes int - maxWait time.Duration - version int64 - msgs chan<- readerMessage - stats *readerStats -} - -type readerMessage struct { - version int64 - message Message - watermark int64 - error error -} - -func (r *reader) run(ctx context.Context, offset int64) { - const backoffDelayMin = 100 * time.Millisecond - const backoffDelayMax = 1 * time.Second - - // This is the reader's main loop, it only ends if the context is canceled - // and will keep attempting to reader messages otherwise. - // - // Retrying indefinitely has the nice side effect of preventing Read calls - // on the parent reader to block if connection to the kafka server fails, - // the reader keeps reporting errors on the error channel which will then - // be surfaced to the program. - // If the reader wasn't retrying then the program would block indefinitely - // on a Read call after reading the first error. - for attempt := 0; true; attempt++ { - if attempt != 0 { - if !sleep(ctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { - return - } - } - - r.withLogger(func(log *log.Logger) { - log.Printf("initializing kafka reader for partition %d of %s starting at offset %d", r.partition, r.topic, offset) - }) - - conn, start, err := r.initialize(ctx, offset) - switch err { - case nil: - case OffsetOutOfRange: - // This would happen if the requested offset is passed the last - // offset on the partition leader. In that case we're just going - // to retry later hoping that enough data has been produced. - r.withErrorLogger(func(log *log.Logger) { - log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, OffsetOutOfRange) - }) - continue - default: - // Wait 4 attempts before reporting the first errors, this helps - // mitigate situations where the kafka server is temporarily - // unavailable. - if attempt >= 3 { - r.sendError(ctx, err) - } else { - r.stats.errors.observe(1) - r.withErrorLogger(func(log *log.Logger) { - log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err) - }) - } - continue - } - - // Resetting the attempt counter ensures that if a failure occurs after - // a successful initialization we don't keep increasing the backoff - // timeout. - attempt = 0 - - // Now we're sure to have an absolute offset number, may anything happen - // to the connection we know we'll want to restart from this offset. - offset = start - - errcount := 0 - readLoop: - for { - if !sleep(ctx, backoff(errcount, backoffDelayMin, backoffDelayMax)) { - conn.Close() - return - } - - switch offset, err = r.read(ctx, offset, conn); err { - case nil: - errcount = 0 - - case NotLeaderForPartition: - r.withErrorLogger(func(log *log.Logger) { - log.Printf("failed to read from current broker for partition %d of %s at offset %d, not the leader", r.partition, r.topic, offset) - }) - - conn.Close() - - // The next call to .initialize will re-establish a connection to the proper - // partition leader. - r.stats.rebalances.observe(1) - break readLoop - - case RequestTimedOut: - // Timeout on the kafka side, this can be safely retried. - errcount = 0 - r.withErrorLogger(func(log *log.Logger) { - log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d", r.partition, r.topic, offset) - }) - r.stats.timeouts.observe(1) - continue - - case OffsetOutOfRange: - first, last, err := r.readOffsets(conn) - - if err != nil { - r.withErrorLogger(func(log *log.Logger) { - log.Printf("the kafka reader got an error while attempting to determine whether it was reading before the first offset or after the last offset of partition %d of %s: %s", r.partition, r.topic, err) - }) - conn.Close() - break readLoop - } - - switch { - case offset < first: - r.withErrorLogger(func(log *log.Logger) { - log.Printf("the kafka reader is reading before the first offset for partition %d of %s, skipping from offset %d to %d (%d messages)", r.partition, r.topic, offset, first, first-offset) - }) - offset, errcount = first, 0 - continue // retry immediately so we don't keep falling behind due to the backoff - - case offset < last: - errcount = 0 - continue // more messages have already become available, retry immediately - - default: - // We may be reading past the last offset, will retry later. - r.withErrorLogger(func(log *log.Logger) { - log.Printf("the kafka reader is reading passed the last offset for partition %d of %s at offset %d", r.partition, r.topic, offset) - }) - } - - case context.Canceled: - // Another reader has taken over, we can safely quit. - conn.Close() - return - - default: - if _, ok := err.(Error); ok { - r.sendError(ctx, err) - } else { - r.withErrorLogger(func(log *log.Logger) { - log.Printf("the kafka reader got an unknown error reading partition %d of %s at offset %d: %s", r.partition, r.topic, offset, err) - }) - r.stats.errors.observe(1) - conn.Close() - break readLoop - } - } - - errcount++ - } - } -} - -func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, start int64, err error) { - for i := 0; i != len(r.brokers) && conn == nil; i++ { - var broker = r.brokers[i] - var first, last int64 - - t0 := time.Now() - conn, err = r.dialer.DialLeader(ctx, "tcp", broker, r.topic, r.partition) - t1 := time.Now() - r.stats.dials.observe(1) - r.stats.dialTime.observeDuration(t1.Sub(t0)) - - if err != nil { - continue - } - - if first, last, err = r.readOffsets(conn); err != nil { - conn.Close() - conn = nil - break - } - - switch { - case offset == FirstOffset: - offset = first - - case offset == LastOffset: - offset = last - - case offset < first: - offset = first - } - - r.withLogger(func(log *log.Logger) { - log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, offset) - }) - - if start, err = conn.Seek(offset, SeekAbsolute); err != nil { - conn.Close() - conn = nil - break - } - - conn.SetDeadline(time.Time{}) - } - - return -} - -func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, error) { - r.stats.fetches.observe(1) - r.stats.offset.observe(offset) - - t0 := time.Now() - conn.SetReadDeadline(t0.Add(r.maxWait)) - - batch := conn.ReadBatch(r.minBytes, r.maxBytes) - highWaterMark := batch.HighWaterMark() - - t1 := time.Now() - r.stats.waitTime.observeDuration(t1.Sub(t0)) - - var msg Message - var err error - var size int64 - var bytes int64 - - const safetyTimeout = 10 * time.Second - deadline := time.Now().Add(safetyTimeout) - conn.SetReadDeadline(deadline) - - for { - if now := time.Now(); deadline.Sub(now) < (safetyTimeout / 2) { - deadline = now.Add(safetyTimeout) - conn.SetReadDeadline(deadline) - } - - if msg, err = batch.ReadMessage(); err != nil { - err = batch.Close() - break - } - - n := int64(len(msg.Key) + len(msg.Value)) - r.stats.messages.observe(1) - r.stats.bytes.observe(n) - - if err = r.sendMessage(ctx, msg, highWaterMark); err != nil { - err = batch.Close() - break - } - - offset = msg.Offset + 1 - r.stats.offset.observe(offset) - r.stats.lag.observe(highWaterMark - offset) - - size++ - bytes += n - } - - conn.SetReadDeadline(time.Time{}) - - t2 := time.Now() - r.stats.readTime.observeDuration(t2.Sub(t1)) - r.stats.fetchSize.observe(size) - r.stats.fetchBytes.observe(bytes) - return offset, err -} - -func (r *reader) readOffsets(conn *Conn) (first, last int64, err error) { - conn.SetDeadline(time.Now().Add(10 * time.Second)) - return conn.ReadOffsets() -} - -func (r *reader) sendMessage(ctx context.Context, msg Message, watermark int64) error { - select { - case r.msgs <- readerMessage{version: r.version, message: msg, watermark: watermark}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (r *reader) sendError(ctx context.Context, err error) error { - select { - case r.msgs <- readerMessage{version: r.version, error: err}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (r *reader) withLogger(do func(*log.Logger)) { - if r.logger != nil { - do(r.logger) - } -} - -func (r *reader) withErrorLogger(do func(*log.Logger)) { - if r.errorLogger != nil { - do(r.errorLogger) - } else { - r.withLogger(do) - } -} - -// extractTopics returns the unique list of topics represented by the set of -// provided members -func extractTopics(members []GroupMember) []string { - var visited = map[string]struct{}{} - var topics []string - - for _, member := range members { - for _, topic := range member.Topics { - if _, seen := visited[topic]; seen { - continue - } - - topics = append(topics, topic) - visited[topic] = struct{}{} - } - } - - sort.Strings(topics) - - return topics -} diff --git a/vendor/github.com/segmentio/kafka-go/rungroup.go b/vendor/github.com/segmentio/kafka-go/rungroup.go deleted file mode 100644 index b8cd704f0..000000000 --- a/vendor/github.com/segmentio/kafka-go/rungroup.go +++ /dev/null @@ -1,61 +0,0 @@ -package kafka - -import ( - "context" - "sync" -) - -// runGroup is a collection of goroutines working together. If any one goroutine -// stops, then all goroutines will be stopped. -// -// A zero runGroup is valid -type runGroup struct { - initOnce sync.Once - - ctx context.Context - cancel context.CancelFunc - - wg sync.WaitGroup -} - -func (r *runGroup) init() { - if r.cancel == nil { - r.ctx, r.cancel = context.WithCancel(context.Background()) - } -} - -func (r *runGroup) WithContext(ctx context.Context) *runGroup { - ctx, cancel := context.WithCancel(ctx) - return &runGroup{ - ctx: ctx, - cancel: cancel, - } -} - -// Wait blocks until all function calls have returned. -func (r *runGroup) Wait() { - r.wg.Wait() -} - -// Stop stops the goroutines and waits for them to complete -func (r *runGroup) Stop() { - r.initOnce.Do(r.init) - r.cancel() - r.Wait() -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (r *runGroup) Go(f func(stop <-chan struct{})) { - r.initOnce.Do(r.init) - - r.wg.Add(1) - go func() { - defer r.wg.Done() - defer r.cancel() - - f(r.ctx.Done()) - }() -} diff --git a/vendor/github.com/segmentio/kafka-go/sizeof.go b/vendor/github.com/segmentio/kafka-go/sizeof.go deleted file mode 100644 index 87feb1a14..000000000 --- a/vendor/github.com/segmentio/kafka-go/sizeof.go +++ /dev/null @@ -1,73 +0,0 @@ -package kafka - -import "fmt" - -type sizable interface { - size() int32 -} - -func sizeof(a interface{}) int32 { - switch v := a.(type) { - case int8: - return 1 - case int16: - return 2 - case int32: - return 4 - case int64: - return 8 - case string: - return sizeofString(v) - case bool: - return 1 - case []byte: - return sizeofBytes(v) - case sizable: - return v.size() - } - panic(fmt.Sprintf("unsupported type: %T", a)) -} - -func sizeofInt8(_ int8) int32 { - return 1 -} - -func sizeofInt16(_ int16) int32 { - return 2 -} - -func sizeofInt32(_ int32) int32 { - return 4 -} - -func sizeofInt64(_ int64) int32 { - return 8 -} - -func sizeofString(s string) int32 { - return 2 + int32(len(s)) -} - -func sizeofBool(_ bool) int32 { - return 1 -} - -func sizeofBytes(b []byte) int32 { - return 4 + int32(len(b)) -} - -func sizeofArray(n int, f func(int) int32) int32 { - s := int32(4) - for i := 0; i != n; i++ { - s += f(i) - } - return s -} - -func sizeofInt32Array(a []int32) int32 { - return 4 + (4 * int32(len(a))) -} - -func sizeofStringArray(a []string) int32 { - return sizeofArray(len(a), func(i int) int32 { return sizeofString(a[i]) }) -} diff --git a/vendor/github.com/segmentio/kafka-go/stats.go b/vendor/github.com/segmentio/kafka-go/stats.go deleted file mode 100644 index 9c69dc066..000000000 --- a/vendor/github.com/segmentio/kafka-go/stats.go +++ /dev/null @@ -1,186 +0,0 @@ -package kafka - -import ( - "sync/atomic" - "time" -) - -// SummaryStats is a data structure that carries a summary of observed values. -// The average, minimum, and maximum are reported. -type SummaryStats struct { - Avg int64 `metric:"avg" type:"gauge"` - Min int64 `metric:"min" type:"gauge"` - Max int64 `metric:"max" type:"gauge"` -} - -// DurationStats is a data structure that carries a summary of observed duration -// values. The average, minimum, and maximum are reported. -type DurationStats struct { - Avg time.Duration `metric:"avg" type:"gauge"` - Min time.Duration `metric:"min" type:"gauge"` - Max time.Duration `metric:"max" type:"gauge"` -} - -// counter is an atomic incrementing counter which gets reset on snapshot. -// -// Since atomic is used to mutate the statistic the value must be 64-bit aligned. -// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG -type counter int64 - -func (c *counter) ptr() *int64 { - return (*int64)(c) -} - -func (c *counter) observe(v int64) { - atomic.AddInt64(c.ptr(), v) -} - -func (c *counter) snapshot() int64 { - p := c.ptr() - v := atomic.LoadInt64(p) - atomic.AddInt64(p, -v) - return v -} - -// gauge is an atomic integer that may be set to any arbitrary value, the value -// does not change after a snapshot. -// -// Since atomic is used to mutate the statistic the value must be 64-bit aligned. -// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG -type gauge int64 - -func (g *gauge) ptr() *int64 { - return (*int64)(g) -} - -func (g *gauge) observe(v int64) { - atomic.StoreInt64(g.ptr(), v) -} - -func (g *gauge) snapshot() int64 { - return atomic.LoadInt64(g.ptr()) -} - -// minimum is an atomic integral type that keeps track of the minimum of all -// values that it observed between snapshots. -// -// Since atomic is used to mutate the statistic the value must be 64-bit aligned. -// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG -type minimum int64 - -func (m *minimum) ptr() *int64 { - return (*int64)(m) -} - -func (m *minimum) observe(v int64) { - for { - ptr := m.ptr() - min := atomic.LoadInt64(ptr) - - if min >= 0 && min <= v { - break - } - - if atomic.CompareAndSwapInt64(ptr, min, v) { - break - } - } -} - -func (m *minimum) snapshot() int64 { - p := m.ptr() - v := atomic.LoadInt64(p) - atomic.CompareAndSwapInt64(p, v, -1) - if v < 0 { - v = 0 - } - return v -} - -// maximum is an atomic integral type that keeps track of the maximum of all -// values that it observed between snapshots. -// -// Since atomic is used to mutate the statistic the value must be 64-bit aligned. -// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG -type maximum int64 - -func (m *maximum) ptr() *int64 { - return (*int64)(m) -} - -func (m *maximum) observe(v int64) { - for { - ptr := m.ptr() - max := atomic.LoadInt64(ptr) - - if max >= 0 && max >= v { - break - } - - if atomic.CompareAndSwapInt64(ptr, max, v) { - break - } - } -} - -func (m *maximum) snapshot() int64 { - p := m.ptr() - v := atomic.LoadInt64(p) - atomic.CompareAndSwapInt64(p, v, -1) - if v < 0 { - v = 0 - } - return v -} - -type summary struct { - min minimum - max maximum - sum counter - count counter -} - -func makeSummary() summary { - return summary{ - min: -1, - max: -1, - } -} - -func (s *summary) observe(v int64) { - s.min.observe(v) - s.max.observe(v) - s.sum.observe(v) - s.count.observe(1) -} - -func (s *summary) observeDuration(v time.Duration) { - s.observe(int64(v)) -} - -func (s *summary) snapshot() SummaryStats { - avg := int64(0) - min := s.min.snapshot() - max := s.max.snapshot() - sum := s.sum.snapshot() - count := s.count.snapshot() - - if count != 0 { - avg = int64(float64(sum) / float64(count)) - } - - return SummaryStats{ - Avg: avg, - Min: min, - Max: max, - } -} - -func (s *summary) snapshotDuration() DurationStats { - summary := s.snapshot() - return DurationStats{ - Avg: time.Duration(summary.Avg), - Min: time.Duration(summary.Min), - Max: time.Duration(summary.Max), - } -} diff --git a/vendor/github.com/segmentio/kafka-go/syncgroup.go b/vendor/github.com/segmentio/kafka-go/syncgroup.go deleted file mode 100644 index 668b507fd..000000000 --- a/vendor/github.com/segmentio/kafka-go/syncgroup.go +++ /dev/null @@ -1,141 +0,0 @@ -package kafka - -import ( - "bufio" - "bytes" -) - -type groupAssignment struct { - Version int16 - Topics map[string][]int32 - UserData []byte -} - -func (t groupAssignment) size() int32 { - sz := sizeofInt16(t.Version) + sizeofInt16(int16(len(t.Topics))) - - for topic, partitions := range t.Topics { - sz += sizeofString(topic) + sizeofInt32Array(partitions) - } - - return sz + sizeofBytes(t.UserData) -} - -func (t groupAssignment) writeTo(w *bufio.Writer) { - writeInt16(w, t.Version) - writeInt32(w, int32(len(t.Topics))) - - for topic, partitions := range t.Topics { - writeString(w, topic) - writeInt32Array(w, partitions) - } - - writeBytes(w, t.UserData) -} - -func (t *groupAssignment) readFrom(r *bufio.Reader, size int) (remain int, err error) { - // I came across this case when testing for compatibility with bsm/sarama-cluster. It - // appears in some cases, sarama-cluster can send a nil array entry. Admittedly, I - // didn't look too closely at it. - if size == 0 { - t.Topics = map[string][]int32{} - return 0, nil - } - - if remain, err = readInt16(r, size, &t.Version); err != nil { - return - } - if remain, err = readMapStringInt32(r, remain, &t.Topics); err != nil { - return - } - if remain, err = readBytes(r, remain, &t.UserData); err != nil { - return - } - - return -} - -func (t groupAssignment) bytes() []byte { - buf := bytes.NewBuffer(nil) - w := bufio.NewWriter(buf) - t.writeTo(w) - w.Flush() - return buf.Bytes() -} - -type syncGroupRequestGroupAssignmentV0 struct { - // MemberID assigned by the group coordinator - MemberID string - - // MemberAssignments holds client encoded assignments - // - // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol - MemberAssignments []byte -} - -func (t syncGroupRequestGroupAssignmentV0) size() int32 { - return sizeofString(t.MemberID) + - sizeofBytes(t.MemberAssignments) -} - -func (t syncGroupRequestGroupAssignmentV0) writeTo(w *bufio.Writer) { - writeString(w, t.MemberID) - writeBytes(w, t.MemberAssignments) -} - -type syncGroupRequestV0 struct { - // GroupID holds the unique group identifier - GroupID string - - // GenerationID holds the generation of the group. - GenerationID int32 - - // MemberID assigned by the group coordinator - MemberID string - - GroupAssignments []syncGroupRequestGroupAssignmentV0 -} - -func (t syncGroupRequestV0) size() int32 { - return sizeofString(t.GroupID) + - sizeofInt32(t.GenerationID) + - sizeofString(t.MemberID) + - sizeofArray(len(t.GroupAssignments), func(i int) int32 { return t.GroupAssignments[i].size() }) -} - -func (t syncGroupRequestV0) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.GenerationID) - writeString(w, t.MemberID) - writeArray(w, len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(w) }) -} - -type syncGroupResponseV0 struct { - // ErrorCode holds response error code - ErrorCode int16 - - // MemberAssignments holds client encoded assignments - // - // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol - MemberAssignments []byte -} - -func (t syncGroupResponseV0) size() int32 { - return sizeofInt16(t.ErrorCode) + - sizeofBytes(t.MemberAssignments) -} - -func (t syncGroupResponseV0) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) - writeBytes(w, t.MemberAssignments) -} - -func (t *syncGroupResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { - if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { - return - } - if remain, err = readBytes(r, remain, &t.MemberAssignments); err != nil { - return - } - return -} diff --git a/vendor/github.com/segmentio/kafka-go/time.go b/vendor/github.com/segmentio/kafka-go/time.go deleted file mode 100644 index 26f33afd0..000000000 --- a/vendor/github.com/segmentio/kafka-go/time.go +++ /dev/null @@ -1,55 +0,0 @@ -package kafka - -import ( - "math" - "time" -) - -const ( - maxTimeout = time.Duration(math.MaxInt32) * time.Millisecond - minTimeout = time.Duration(math.MinInt32) * time.Millisecond - defaultRTT = 1 * time.Second -) - -func timestamp(t time.Time) int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() / int64(time.Millisecond) -} - -func timestampToTime(t int64) time.Time { - return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)) -} - -func duration(ms int32) time.Duration { - return time.Duration(ms) * time.Millisecond -} - -func milliseconds(d time.Duration) int32 { - switch { - case d > maxTimeout: - d = maxTimeout - case d < minTimeout: - d = minTimeout - } - return int32(d / time.Millisecond) -} - -func deadlineToTimeout(deadline time.Time, now time.Time) time.Duration { - if deadline.IsZero() { - return maxTimeout - } - return deadline.Sub(now) -} - -func adjustDeadlineForRTT(deadline time.Time, now time.Time, rtt time.Duration) time.Time { - if !deadline.IsZero() { - timeout := deadline.Sub(now) - if timeout < rtt { - rtt = timeout / 4 - } - deadline = deadline.Add(-rtt) - } - return deadline -} diff --git a/vendor/github.com/segmentio/kafka-go/write.go b/vendor/github.com/segmentio/kafka-go/write.go deleted file mode 100644 index 216da873f..000000000 --- a/vendor/github.com/segmentio/kafka-go/write.go +++ /dev/null @@ -1,288 +0,0 @@ -package kafka - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "time" -) - -type writable interface { - writeTo(*bufio.Writer) -} - -func writeInt8(w *bufio.Writer, i int8) { - w.WriteByte(byte(i)) -} - -func writeInt16(w *bufio.Writer, i int16) { - var b [2]byte - binary.BigEndian.PutUint16(b[:], uint16(i)) - w.WriteByte(b[0]) - w.WriteByte(b[1]) -} - -func writeInt32(w *bufio.Writer, i int32) { - var b [4]byte - binary.BigEndian.PutUint32(b[:], uint32(i)) - w.WriteByte(b[0]) - w.WriteByte(b[1]) - w.WriteByte(b[2]) - w.WriteByte(b[3]) -} - -func writeInt64(w *bufio.Writer, i int64) { - var b [8]byte - binary.BigEndian.PutUint64(b[:], uint64(i)) - w.WriteByte(b[0]) - w.WriteByte(b[1]) - w.WriteByte(b[2]) - w.WriteByte(b[3]) - w.WriteByte(b[4]) - w.WriteByte(b[5]) - w.WriteByte(b[6]) - w.WriteByte(b[7]) -} - -func writeString(w *bufio.Writer, s string) { - writeInt16(w, int16(len(s))) - w.WriteString(s) -} - -func writeBytes(w *bufio.Writer, b []byte) { - n := len(b) - if b == nil { - n = -1 - } - writeInt32(w, int32(n)) - w.Write(b) -} - -func writeBool(w *bufio.Writer, b bool) { - v := int8(0) - if b { - v = 1 - } - writeInt8(w, v) -} - -func writeArrayLen(w *bufio.Writer, n int) { - writeInt32(w, int32(n)) -} - -func writeArray(w *bufio.Writer, n int, f func(int)) { - writeArrayLen(w, n) - for i := 0; i != n; i++ { - f(i) - } -} - -func writeStringArray(w *bufio.Writer, a []string) { - writeArray(w, len(a), func(i int) { writeString(w, a[i]) }) -} - -func writeInt32Array(w *bufio.Writer, a []int32) { - writeArray(w, len(a), func(i int) { writeInt32(w, a[i]) }) -} - -func write(w *bufio.Writer, a interface{}) { - switch v := a.(type) { - case int8: - writeInt8(w, v) - case int16: - writeInt16(w, v) - case int32: - writeInt32(w, v) - case int64: - writeInt64(w, v) - case string: - writeString(w, v) - case []byte: - writeBytes(w, v) - case bool: - writeBool(w, v) - case writable: - v.writeTo(w) - default: - panic(fmt.Sprintf("unsupported type: %T", a)) - } -} - -// The functions bellow are used as optimizations to avoid dynamic memory -// allocations that occur when building the data structures representing the -// kafka protocol requests. - -func writeFetchRequestV2(w *bufio.Writer, correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration) error { - h := requestHeader{ - ApiKey: int16(fetchRequest), - ApiVersion: int16(v2), - CorrelationID: correlationID, - ClientID: clientID, - } - h.Size = (h.size() - 4) + - 4 + // replica ID - 4 + // max wait time - 4 + // min bytes - 4 + // topic array length - sizeofString(topic) + - 4 + // partition array length - 4 + // partition - 8 + // offset - 4 // max bytes - - h.writeTo(w) - writeInt32(w, -1) // replica ID - writeInt32(w, milliseconds(maxWait)) - writeInt32(w, int32(minBytes)) - - // topic array - writeArrayLen(w, 1) - writeString(w, topic) - - // partition array - writeArrayLen(w, 1) - writeInt32(w, partition) - writeInt64(w, offset) - writeInt32(w, int32(maxBytes)) - - return w.Flush() -} - -func writeListOffsetRequestV1(w *bufio.Writer, correlationID int32, clientID, topic string, partition int32, time int64) error { - h := requestHeader{ - ApiKey: int16(listOffsetRequest), - ApiVersion: int16(v1), - CorrelationID: correlationID, - ClientID: clientID, - } - h.Size = (h.size() - 4) + - 4 + // replica ID - 4 + // topic array length - sizeofString(topic) + // topic - 4 + // partition array length - 4 + // partition - 8 // time - - h.writeTo(w) - writeInt32(w, -1) // replica ID - - // topic array - writeArrayLen(w, 1) - writeString(w, topic) - - // partition array - writeArrayLen(w, 1) - writeInt32(w, partition) - writeInt64(w, time) - - return w.Flush() -} - -func writeProduceRequestV2(w *bufio.Writer, codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) error { - var size int32 - attributes := int8(CompressionNoneCode) - - // if compressing, replace the slice of messages with a single compressed - // message set. - if codec != nil { - var err error - if msgs, err = compress(codec, msgs...); err != nil { - return err - } - attributes = codec.Code() - } - - for _, msg := range msgs { - size += 8 + // offset - 4 + // message size - 4 + // crc - 1 + // magic byte - 1 + // attributes - 8 + // timestamp - sizeofBytes(msg.Key) + - sizeofBytes(msg.Value) - } - - h := requestHeader{ - ApiKey: int16(produceRequest), - ApiVersion: int16(v2), - CorrelationID: correlationID, - ClientID: clientID, - } - h.Size = (h.size() - 4) + - 2 + // required acks - 4 + // timeout - 4 + // topic array length - sizeofString(topic) + // topic - 4 + // partition array length - 4 + // partition - 4 + // message set size - size - - h.writeTo(w) - writeInt16(w, requiredAcks) // required acks - writeInt32(w, milliseconds(timeout)) - - // topic array - writeArrayLen(w, 1) - writeString(w, topic) - - // partition array - writeArrayLen(w, 1) - writeInt32(w, partition) - writeInt32(w, size) - - for _, msg := range msgs { - writeMessage(w, msg.Offset, attributes, msg.Time, msg.Key, msg.Value) - } - - return w.Flush() -} - -func compress(codec CompressionCodec, msgs ...Message) ([]Message, error) { - estimatedLen := 0 - for _, msg := range msgs { - estimatedLen += int(msgSize(msg.Key, msg.Value)) - } - buf := &bytes.Buffer{} - buf.Grow(estimatedLen) - bufWriter := bufio.NewWriter(buf) - for offset, msg := range msgs { - writeMessage(bufWriter, int64(offset), CompressionNoneCode, msg.Time, msg.Key, msg.Value) - } - bufWriter.Flush() - - compressed, err := codec.Encode(buf.Bytes()) - if err != nil { - return nil, err - } - - return []Message{{Value: compressed}}, nil -} - -const magicByte = 1 // compatible with kafka 0.10.0.0+ - -func writeMessage(w *bufio.Writer, offset int64, attributes int8, time time.Time, key, value []byte) { - timestamp := timestamp(time) - crc32 := crc32OfMessage(magicByte, attributes, timestamp, key, value) - size := msgSize(key, value) - - writeInt64(w, offset) - writeInt32(w, size) - writeInt32(w, int32(crc32)) - writeInt8(w, magicByte) - writeInt8(w, attributes) - writeInt64(w, timestamp) - writeBytes(w, key) - writeBytes(w, value) -} - -func msgSize(key, value []byte) int32 { - return 4 + // crc - 1 + // magic byte - 1 + // attributes - 8 + // timestamp - sizeofBytes(key) + - sizeofBytes(value) -} diff --git a/vendor/github.com/segmentio/kafka-go/writer.go b/vendor/github.com/segmentio/kafka-go/writer.go deleted file mode 100644 index e183e636f..000000000 --- a/vendor/github.com/segmentio/kafka-go/writer.go +++ /dev/null @@ -1,734 +0,0 @@ -package kafka - -import ( - "context" - "fmt" - "io" - "log" - "math/rand" - "sort" - "sync" - "time" -) - -// The Writer type provides the implementation of a producer of kafka messages -// that automatically distributes messages across partitions of a single topic -// using a configurable balancing policy. -// -// Instances of Writer are safe to use concurrently from multiple goroutines. -type Writer struct { - config WriterConfig - - mutex sync.RWMutex - closed bool - - join sync.WaitGroup - msgs chan writerMessage - done chan struct{} - - // writer stats are all made of atomic values, no need for synchronization. - // Use a pointer to ensure 64-bit alignment of the values. - stats *writerStats -} - -// WriterConfig is a configuration type used to create new instances of Writer. -type WriterConfig struct { - // The list of brokers used to discover the partitions available on the - // kafka cluster. - // - // This field is required, attempting to create a writer with an empty list - // of brokers will panic. - Brokers []string - - // The topic that the writer will produce messages to. - // - // This field is required, attempting to create a writer with an empty topic - // will panic. - Topic string - - // The dialer used by the writer to establish connections to the kafka - // cluster. - // - // If nil, the default dialer is used instead. - Dialer *Dialer - - // The balancer used to distribute messages across partitions. - // - // The default is to use a round-robin distribution. - Balancer Balancer - - // Limit on how many attempts will be made to deliver a message. - // - // The default is to try at most 10 times. - MaxAttempts int - - // A hint on the capacity of the writer's internal message queue. - // - // The default is to use a queue capacity of 100 messages. - QueueCapacity int - - // Limit on how many messages will be buffered before being sent to a - // partition. - // - // The default is to use a target batch size of 100 messages. - BatchSize int - - // Time limit on how often incomplete message batches will be flushed to - // kafka. - // - // The default is to flush at least every second. - BatchTimeout time.Duration - - // Timeout for read operations performed by the Writer. - // - // Defaults to 10 seconds. - ReadTimeout time.Duration - - // Timeout for write operation performed by the Writer. - // - // Defaults to 10 seconds. - WriteTimeout time.Duration - - // This interval defines how often the list of partitions is refreshed from - // kafka. It allows the writer to automatically handle when new partitions - // are added to a topic. - // - // The default is to refresh partitions every 15 seconds. - RebalanceInterval time.Duration - - // Number of acknowledges from partition replicas required before receiving - // a response to a produce request (default to -1, which means to wait for - // all replicas). - RequiredAcks int - - // Setting this flag to true causes the WriteMessages method to never block. - // It also means that errors are ignored since the caller will not receive - // the returned value. Use this only if you don't care about guarantees of - // whether the messages were written to kafka. - Async bool - - // CompressionCodec set the codec to be used to compress Kafka messages. - // Note that messages are allowed to overwrite the compression codec individually. - CompressionCodec - - // If not nil, specifies a logger used to report internal changes within the - // writer. - Logger *log.Logger - - // ErrorLogger is the logger used to report errors. If nil, the writer falls - // back to using Logger instead. - ErrorLogger *log.Logger - - newPartitionWriter func(partition int, config WriterConfig, stats *writerStats) partitionWriter -} - -// WriterStats is a data structure returned by a call to Writer.Stats that -// exposes details about the behavior of the writer. -type WriterStats struct { - Dials int64 `metric:"kafka.writer.dial.count" type:"counter"` - Writes int64 `metric:"kafka.writer.write.count" type:"counter"` - Messages int64 `metric:"kafka.writer.message.count" type:"counter"` - Bytes int64 `metric:"kafka.writer.message.bytes" type:"counter"` - Rebalances int64 `metric:"kafka.writer.rebalance.count" type:"counter"` - Errors int64 `metric:"kafka.writer.error.count" type:"counter"` - - DialTime DurationStats `metric:"kafka.writer.dial.seconds"` - WriteTime DurationStats `metric:"kafka.writer.write.seconds"` - WaitTime DurationStats `metric:"kafka.writer.wait.seconds"` - Retries SummaryStats `metric:"kafka.writer.retries.count"` - BatchSize SummaryStats `metric:"kafka.writer.batch.size"` - - MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"` - MaxBatchSize int64 `metric:"kafka.writer.batch.max" type:"gauge"` - BatchTimeout time.Duration `metric:"kafka.writer.batch.timeout" type:"gauge"` - ReadTimeout time.Duration `metric:"kafka.writer.read.timeout" type:"gauge"` - WriteTimeout time.Duration `metric:"kafka.writer.write.timeout" type:"gauge"` - RebalanceInterval time.Duration `metric:"kafka.writer.rebalance.interval" type:"gauge"` - RequiredAcks int64 `metric:"kafka.writer.acks.required" type:"gauge"` - Async bool `metric:"kafka.writer.async" type:"gauge"` - QueueLength int64 `metric:"kafka.writer.queue.length" type:"gauge"` - QueueCapacity int64 `metric:"kafka.writer.queue.capacity" type:"gauge"` - - ClientID string `tag:"client_id"` - Topic string `tag:"topic"` -} - -// writerStats is a struct that contains statistics on a writer. -// -// Since atomic is used to mutate the statistics the values must be 64-bit aligned. -// This is easily accomplished by always allocating this struct directly, (i.e. using a pointer to the struct). -// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG -type writerStats struct { - dials counter - writes counter - messages counter - bytes counter - rebalances counter - errors counter - dialTime summary - writeTime summary - waitTime summary - retries summary - batchSize summary -} - -// NewWriter creates and returns a new Writer configured with config. -func NewWriter(config WriterConfig) *Writer { - if len(config.Brokers) == 0 { - panic("cannot create a kafka writer with an empty list of brokers") - } - - if len(config.Topic) == 0 { - panic("cannot create a kafka writer with an empty topic") - } - - if config.Dialer == nil { - config.Dialer = DefaultDialer - } - - if config.Balancer == nil { - config.Balancer = &RoundRobin{} - } - - if config.newPartitionWriter == nil { - config.newPartitionWriter = func(partition int, config WriterConfig, stats *writerStats) partitionWriter { - return newWriter(partition, config, stats) - } - } - - if config.MaxAttempts == 0 { - config.MaxAttempts = 10 - } - - if config.QueueCapacity == 0 { - config.QueueCapacity = 100 - } - - if config.BatchSize == 0 { - config.BatchSize = 100 - } - - if config.BatchTimeout == 0 { - config.BatchTimeout = 1 * time.Second - } - - if config.ReadTimeout == 0 { - config.ReadTimeout = 10 * time.Second - } - - if config.WriteTimeout == 0 { - config.WriteTimeout = 10 * time.Second - } - - if config.RebalanceInterval == 0 { - config.RebalanceInterval = 15 * time.Second - } - - w := &Writer{ - config: config, - msgs: make(chan writerMessage, config.QueueCapacity), - done: make(chan struct{}), - stats: &writerStats{ - dialTime: makeSummary(), - writeTime: makeSummary(), - waitTime: makeSummary(), - retries: makeSummary(), - }, - } - - w.join.Add(1) - go w.run() - return w -} - -// WriteMessages writes a batch of messages to the kafka topic configured on this -// writer. -// -// Unless the writer was configured to write messages asynchronously, the method -// blocks until all messages have been written, or until the maximum number of -// attempts was reached. -// -// When the method returns an error, there's no way to know yet which messages -// have succeeded of failed. -// -// The context passed as first argument may also be used to asynchronously -// cancel the operation. Note that in this case there are no guarantees made on -// whether messages were written to kafka. The program should assume that the -// whole batch failed and re-write the messages later (which could then cause -// duplicates). -func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error { - if len(msgs) == 0 { - return nil - } - - var res = make(chan error, len(msgs)) - var err error - - t0 := time.Now() - - for attempt := 0; attempt < w.config.MaxAttempts; attempt++ { - w.mutex.RLock() - - if w.closed { - w.mutex.RUnlock() - return io.ErrClosedPipe - } - - for _, msg := range msgs { - select { - case w.msgs <- writerMessage{ - msg: msg, - res: res, - }: - case <-ctx.Done(): - w.mutex.RUnlock() - return ctx.Err() - } - } - - w.mutex.RUnlock() - - if w.config.Async { - break - } - - var retry []Message - - for i := 0; i != len(msgs); i++ { - select { - case e := <-res: - if e != nil { - if we, ok := e.(*writerError); ok { - w.stats.retries.observe(1) - retry, err = append(retry, we.msg), we.err - } else { - err = e - } - } - case <-ctx.Done(): - return ctx.Err() - } - } - - if msgs = retry; len(msgs) == 0 { - break - } - - timer := time.NewTimer(backoff(attempt+1, 100*time.Millisecond, 1*time.Second)) - select { - case <-timer.C: - // Only clear the error (so we retry the loop) if we have more retries, otherwise - // we risk silencing the error. - if attempt < w.config.MaxAttempts-1 { - err = nil - } - case <-ctx.Done(): - err = ctx.Err() - case <-w.done: - err = io.ErrClosedPipe - } - timer.Stop() - - if err != nil { - break - } - } - - t1 := time.Now() - w.stats.writeTime.observeDuration(t1.Sub(t0)) - - return err -} - -// Stats returns a snapshot of the writer stats since the last time the method -// was called, or since the writer was created if it is called for the first -// time. -// -// A typical use of this method is to spawn a goroutine that will periodically -// call Stats on a kafka writer and report the metrics to a stats collection -// system. -func (w *Writer) Stats() WriterStats { - return WriterStats{ - Dials: w.stats.dials.snapshot(), - Writes: w.stats.writes.snapshot(), - Messages: w.stats.messages.snapshot(), - Bytes: w.stats.bytes.snapshot(), - Rebalances: w.stats.rebalances.snapshot(), - Errors: w.stats.errors.snapshot(), - DialTime: w.stats.dialTime.snapshotDuration(), - WriteTime: w.stats.writeTime.snapshotDuration(), - WaitTime: w.stats.waitTime.snapshotDuration(), - Retries: w.stats.retries.snapshot(), - BatchSize: w.stats.batchSize.snapshot(), - MaxAttempts: int64(w.config.MaxAttempts), - MaxBatchSize: int64(w.config.BatchSize), - BatchTimeout: w.config.BatchTimeout, - ReadTimeout: w.config.ReadTimeout, - WriteTimeout: w.config.WriteTimeout, - RebalanceInterval: w.config.RebalanceInterval, - RequiredAcks: int64(w.config.RequiredAcks), - Async: w.config.Async, - QueueLength: int64(len(w.msgs)), - QueueCapacity: int64(cap(w.msgs)), - ClientID: w.config.Dialer.ClientID, - Topic: w.config.Topic, - } -} - -// Close flushes all buffered messages and closes the writer. The call to Close -// aborts any concurrent calls to WriteMessages, which then return with the -// io.ErrClosedPipe error. -func (w *Writer) Close() (err error) { - w.mutex.Lock() - - if !w.closed { - w.closed = true - close(w.msgs) - close(w.done) - } - - w.mutex.Unlock() - w.join.Wait() - return -} - -func (w *Writer) run() { - defer w.join.Done() - - ticker := time.NewTicker(w.config.RebalanceInterval) - defer ticker.Stop() - - var rebalance = true - var writers = make(map[int]partitionWriter) - var partitions []int - var err error - - for { - if rebalance { - w.stats.rebalances.observe(1) - rebalance = false - - var newPartitions []int - var oldPartitions = partitions - - if newPartitions, err = w.partitions(); err == nil { - for _, partition := range diffp(oldPartitions, newPartitions) { - w.close(writers[partition]) - delete(writers, partition) - } - - for _, partition := range diffp(newPartitions, oldPartitions) { - writers[partition] = w.open(partition) - } - - partitions = newPartitions - } - } - - select { - case wm, ok := <-w.msgs: - if !ok { - for _, writer := range writers { - w.close(writer) - } - return - } - - if len(partitions) != 0 { - selectedPartition := w.config.Balancer.Balance(wm.msg, partitions...) - writers[selectedPartition].messages() <- wm - } else { - // No partitions were found because the topic doesn't exist. - if err == nil { - err = fmt.Errorf("failed to find any partitions for topic %s", w.config.Topic) - } - - wm.res <- &writerError{msg: wm.msg, err: err} - } - - case <-ticker.C: - rebalance = true - } - } -} - -func (w *Writer) partitions() (partitions []int, err error) { - for _, broker := range shuffledStrings(w.config.Brokers) { - var conn *Conn - var plist []Partition - - if conn, err = w.config.Dialer.Dial("tcp", broker); err != nil { - continue - } - - conn.SetReadDeadline(time.Now().Add(w.config.ReadTimeout)) - plist, err = conn.ReadPartitions(w.config.Topic) - conn.Close() - - if err == nil { - partitions = make([]int, len(plist)) - for i, p := range plist { - partitions[i] = p.ID - } - break - } - } - - sort.Ints(partitions) - return -} - -func (w *Writer) open(partition int) partitionWriter { - return w.config.newPartitionWriter(partition, w.config, w.stats) -} - -func (w *Writer) close(writer partitionWriter) { - w.join.Add(1) - go func() { - writer.close() - w.join.Done() - }() -} - -func diffp(new []int, old []int) (diff []int) { - for _, p := range new { - if i := sort.SearchInts(old, p); i == len(old) || old[i] != p { - diff = append(diff, p) - } - } - return -} - -type partitionWriter interface { - messages() chan<- writerMessage - close() -} - -type writer struct { - brokers []string - topic string - partition int - requiredAcks int - batchSize int - batchTimeout time.Duration - writeTimeout time.Duration - dialer *Dialer - msgs chan writerMessage - join sync.WaitGroup - stats *writerStats - codec CompressionCodec - logger *log.Logger - errorLogger *log.Logger -} - -func newWriter(partition int, config WriterConfig, stats *writerStats) *writer { - w := &writer{ - brokers: config.Brokers, - topic: config.Topic, - partition: partition, - requiredAcks: config.RequiredAcks, - batchSize: config.BatchSize, - batchTimeout: config.BatchTimeout, - writeTimeout: config.WriteTimeout, - dialer: config.Dialer, - msgs: make(chan writerMessage, config.QueueCapacity), - stats: stats, - codec: config.CompressionCodec, - logger: config.Logger, - errorLogger: config.ErrorLogger, - } - w.join.Add(1) - go w.run() - return w -} - -func (w *writer) close() { - close(w.msgs) - w.join.Wait() -} - -func (w *writer) messages() chan<- writerMessage { - return w.msgs -} - -func (w *writer) withLogger(do func(*log.Logger)) { - if w.logger != nil { - do(w.logger) - } -} - -func (w *writer) withErrorLogger(do func(*log.Logger)) { - if w.errorLogger != nil { - do(w.errorLogger) - } else { - w.withLogger(do) - } -} - -func (w *writer) run() { - defer w.join.Done() - - ticker := time.NewTicker(w.batchTimeout / 10) - defer ticker.Stop() - - var conn *Conn - var done bool - var batch = make([]Message, 0, w.batchSize) - var resch = make([](chan<- error), 0, w.batchSize) - var lastFlushAt = time.Now() - - defer func() { - if conn != nil { - conn.Close() - } - }() - - for !done { - var mustFlush bool - - select { - case wm, ok := <-w.msgs: - if !ok { - done, mustFlush = true, true - } else { - batch = append(batch, wm.msg) - resch = append(resch, wm.res) - mustFlush = len(batch) >= w.batchSize - } - - case now := <-ticker.C: - mustFlush = now.Sub(lastFlushAt) > w.batchTimeout - } - - if mustFlush { - lastFlushAt = time.Now() - - if len(batch) == 0 { - continue - } - - var err error - if conn, err = w.write(conn, batch, resch); err != nil { - if conn != nil { - conn.Close() - conn = nil - } - } - - for i := range batch { - batch[i] = Message{} - } - - for i := range resch { - resch[i] = nil - } - - batch = batch[:0] - resch = resch[:0] - } - } -} - -func (w *writer) dial() (conn *Conn, err error) { - for _, broker := range shuffledStrings(w.brokers) { - t0 := time.Now() - if conn, err = w.dialer.DialLeader(context.Background(), "tcp", broker, w.topic, w.partition); err == nil { - t1 := time.Now() - w.stats.dials.observe(1) - w.stats.dialTime.observeDuration(t1.Sub(t0)) - conn.SetRequiredAcks(w.requiredAcks) - break - } - } - return -} - -func (w *writer) write(conn *Conn, batch []Message, resch [](chan<- error)) (ret *Conn, err error) { - w.stats.writes.observe(1) - if conn == nil { - if conn, err = w.dial(); err != nil { - w.stats.errors.observe(1) - w.withErrorLogger(func(logger *log.Logger) { - logger.Printf("error dialing kafka brokers for topic %s (partition %d): %s", w.topic, w.partition, err) - }) - for i, res := range resch { - res <- &writerError{msg: batch[i], err: err} - } - return - } - } - - t0 := time.Now() - conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) - - if _, err = conn.WriteCompressedMessages(w.codec, batch...); err != nil { - w.stats.errors.observe(1) - w.withErrorLogger(func(logger *log.Logger) { - logger.Printf("error writing messages to %s (partition %d): %s", w.topic, w.partition, err) - }) - for i, res := range resch { - res <- &writerError{msg: batch[i], err: err} - } - } else { - for _, m := range batch { - w.stats.messages.observe(1) - w.stats.bytes.observe(int64(len(m.Key) + len(m.Value))) - } - for _, res := range resch { - res <- nil - } - } - - t1 := time.Now() - w.stats.waitTime.observeDuration(t1.Sub(t0)) - w.stats.batchSize.observe(int64(len(batch))) - - ret = conn - return -} - -type writerMessage struct { - msg Message - res chan<- error -} - -type writerError struct { - msg Message - err error -} - -func (e *writerError) Cause() error { - return e.err -} - -func (e *writerError) Error() string { - return e.err.Error() -} - -func (e *writerError) Temporary() bool { - return isTemporary(e.err) -} - -func (e *writerError) Timeout() bool { - return isTimeout(e.err) -} - -func shuffledStrings(list []string) []string { - shuffledList := make([]string, len(list)) - copy(shuffledList, list) - - shufflerMutex.Lock() - - for i := range shuffledList { - j := shuffler.Intn(i + 1) - shuffledList[i], shuffledList[j] = shuffledList[j], shuffledList[i] - } - - shufflerMutex.Unlock() - return shuffledList -} - -var ( - shufflerMutex = sync.Mutex{} - shuffler = rand.New(rand.NewSource(time.Now().Unix())) -) diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 6b7d7d1e8..000000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -logrus -vendor diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index 848938a6d..000000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -git: - depth: 1 -env: - - GO111MODULE=on - - GO111MODULE=off -go: [ 1.11.x, 1.12.x ] -os: [ linux, osx ] -matrix: - exclude: - - go: 1.12.x - env: GO111MODULE=off - - go: 1.11.x - os: osx -install: - - ./travis/install.sh - - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi - - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi -script: - - ./travis/cross_build.sh - - export GOMAXPROCS=4 - - export GORACE=halt_on_error=1 - - go test -race -v ./... - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 51a7ab0ca..000000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,200 +0,0 @@ -# 1.4.2 - * Fixes build break for plan9, nacl, solaris -# 1.4.1 -This new release introduces: - * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) - -Fixes: - * Fix Entry.WithContext method to return a copy of the initial entry (#941) - -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/vendor/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index a4796eb07..000000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,495 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -**Are you interested in assisting in maintaining Logrus?** Currently I have a -lot of obligations, and I am unable to provide Logrus with the maintainership it -needs. If you'd like to help, please reach out to me at `simon at author's -username dot com`. - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go deleted file mode 100644 index 8fd189e1c..000000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ /dev/null @@ -1,76 +0,0 @@ -package logrus - -// The following code was sourced and modified from the -// https://github.com/tebeka/atexit package governed by the following license: -// -// Copyright (c) 2012 Miki Tebeka . -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ( - "fmt" - "os" -) - -var handlers = []func(){} - -func runHandler(handler func()) { - defer func() { - if err := recover(); err != nil { - fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) - } - }() - - handler() -} - -func runHandlers() { - for _, handler := range handlers { - runHandler(handler) - } -} - -// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) -func Exit(code int) { - runHandlers() - os.Exit(code) -} - -// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func RegisterExitHandler(handler func()) { - handlers = append(handlers, handler) -} - -// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func DeferExitHandler(handler func()) { - handlers = append([]func(){handler}, handlers...) -} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index 96c2ce15f..000000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go deleted file mode 100644 index da67aba06..000000000 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go deleted file mode 100644 index 63e25583c..000000000 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ /dev/null @@ -1,407 +0,0 @@ -package logrus - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -var ( - bufferPool *sync.Pool - - // qualified package name, cached at first use - logrusPackage string - - // Positions in the call stack when tracing to report the calling method - minimumCallerDepth int - - // Used for caller information initialisation - callerInitOnce sync.Once -) - -const ( - maximumCallerDepth int = 25 - knownLogrusFrames int = 4 -) - -func init() { - bufferPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - } - - // start at the bottom of the stack before the package-name cache is primed - minimumCallerDepth = 1 -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, -// Info, Warn, Error, Fatal or Panic is called on it. These objects can be -// reused and passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Calling method, with package name - Caller *runtime.Frame - - // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), a Buffer may be set to entry - Buffer *bytes.Buffer - - // Contains the context set by the user. Useful for hook processing etc. - Context context.Context - - // err may contain a field formatting error - err string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, plus one optional. Give a little extra room. - Data: make(Fields, 6), - } -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a context to the Entry. -func (entry *Entry) WithContext(ctx context.Context) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - fieldErr := entry.err - for k, v := range fields { - isErrField := false - if t := reflect.TypeOf(v); t != nil { - switch t.Kind() { - case reflect.Func: - isErrField = true - case reflect.Ptr: - isErrField = t.Elem().Kind() == reflect.Func - } - } - if isErrField { - tmp := fmt.Sprintf("can not add field %q", k) - if fieldErr != "" { - fieldErr = entry.err + ", " + tmp - } else { - fieldErr = tmp - } - } else { - data[k] = v - } - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} -} - -// Overrides the time of the Entry. -func (entry *Entry) WithTime(t time.Time) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} -} - -// getPackageName reduces a fully qualified function name to the package name -// There really ought to be to be a better way... -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break - } - } - - return f -} - -// getCaller retrieves the name of the first non-logrus calling function -func getCaller() *runtime.Frame { - - // cache this package's fully-qualified name - callerInitOnce.Do(func() { - pcs := make([]uintptr, 2) - _ = runtime.Callers(0, pcs) - logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) - - // now that we have the cache, we can skip a minimum count of known-logrus functions - // XXX this is dubious, the number of frames may vary - minimumCallerDepth = knownLogrusFrames - }) - - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) - - for f, again := frames.Next(); again; f, again = frames.Next() { - pkg := getPackageName(f.Function) - - // If the caller isn't part of this package, we're done - if pkg != logrusPackage { - return &f - } - } - - // if we got here, we failed to find the caller's context - return nil -} - -func (entry Entry) HasCaller() (has bool) { - return entry.Logger != nil && - entry.Logger.ReportCaller && - entry.Caller != nil -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - - // Default to now, but allow users to override if they want. - // - // We don't have to worry about polluting future calls to Entry#log() - // with this assignment because this function is declared with a - // non-pointer receiver. - if entry.Time.IsZero() { - entry.Time = time.Now() - } - - entry.Level = level - entry.Message = msg - if entry.Logger.ReportCaller { - entry.Caller = getCaller() - } - - entry.fireHooks() - - buffer = bufferPool.Get().(*bytes.Buffer) - buffer.Reset() - defer bufferPool.Put(buffer) - entry.Buffer = buffer - - entry.write() - - entry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -func (entry *Entry) fireHooks() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } else { - _, err = entry.Logger.Out.Write(serialized) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - } -} - -func (entry *Entry) Log(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.log(level, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Trace(args ...interface{}) { - entry.Log(TraceLevel, args...) -} - -func (entry *Entry) Debug(args ...interface{}) { - entry.Log(DebugLevel, args...) -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - entry.Log(InfoLevel, args...) -} - -func (entry *Entry) Warn(args ...interface{}) { - entry.Log(WarnLevel, args...) -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - entry.Log(ErrorLevel, args...) -} - -func (entry *Entry) Fatal(args ...interface{}) { - entry.Log(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - entry.Log(PanicLevel, args...) - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Logf(level Level, format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Tracef(format string, args ...interface{}) { - entry.Logf(TraceLevel, format, args...) -} - -func (entry *Entry) Debugf(format string, args ...interface{}) { - entry.Logf(DebugLevel, format, args...) -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - entry.Logf(InfoLevel, format, args...) -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - entry.Logf(WarnLevel, format, args...) -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - entry.Logf(ErrorLevel, format, args...) -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - entry.Logf(FatalLevel, format, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - entry.Logf(PanicLevel, format, args...) -} - -// Entry Println family functions - -func (entry *Entry) Logln(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Traceln(args ...interface{}) { - entry.Logln(TraceLevel, args...) -} - -func (entry *Entry) Debugln(args ...interface{}) { - entry.Logln(DebugLevel, args...) -} - -func (entry *Entry) Infoln(args ...interface{}) { - entry.Logln(InfoLevel, args...) -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - entry.Logln(WarnLevel, args...) -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - entry.Logln(ErrorLevel, args...) -} - -func (entry *Entry) Fatalln(args ...interface{}) { - entry.Logln(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - entry.Logln(PanicLevel, args...) -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go deleted file mode 100644 index 62fc2f219..000000000 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ /dev/null @@ -1,225 +0,0 @@ -package logrus - -import ( - "context" - "io" - "time" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.SetFormatter(formatter) -} - -// SetReportCaller sets whether the standard logger will include the calling -// method as a field. -func SetReportCaller(include bool) { - std.SetReportCaller(include) -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.SetLevel(level) -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - return std.GetLevel() -} - -// IsLevelEnabled checks if the log level of the standard logger is greater than the level param -func IsLevelEnabled(level Level) bool { - return std.IsLevelEnabled(level) -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithContext creates an entry from the standard logger and adds a context to it. -func WithContext(ctx context.Context) *Entry { - return std.WithContext(ctx) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// WithTime creats an entry from the standard logger and overrides the time of -// logs generated with it. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithTime(t time.Time) *Entry { - return std.WithTime(t) -} - -// Trace logs a message at level Trace on the standard logger. -func Trace(args ...interface{}) { - std.Trace(args...) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Tracef logs a message at level Trace on the standard logger. -func Tracef(format string, args ...interface{}) { - std.Tracef(format, args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Traceln logs a message at level Trace on the standard logger. -func Traceln(args ...interface{}) { - std.Traceln(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go deleted file mode 100644 index 408883773..000000000 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ /dev/null @@ -1,78 +0,0 @@ -package logrus - -import "time" - -// Default key names for the default fields -const ( - defaultTimestampFormat = time.RFC3339 - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" - FieldKeyLogrusError = "logrus_error" - FieldKeyFunc = "func" - FieldKeyFile = "file" -) - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { - timeKey := fieldMap.resolve(FieldKeyTime) - if t, ok := data[timeKey]; ok { - data["fields."+timeKey] = t - delete(data, timeKey) - } - - msgKey := fieldMap.resolve(FieldKeyMsg) - if m, ok := data[msgKey]; ok { - data["fields."+msgKey] = m - delete(data, msgKey) - } - - levelKey := fieldMap.resolve(FieldKeyLevel) - if l, ok := data[levelKey]; ok { - data["fields."+levelKey] = l - delete(data, levelKey) - } - - logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) - if l, ok := data[logrusErrKey]; ok { - data["fields."+logrusErrKey] = l - delete(data, logrusErrKey) - } - - // If reportCaller is not set, 'func' will not conflict. - if reportCaller { - funcKey := fieldMap.resolve(FieldKeyFunc) - if l, ok := data[funcKey]; ok { - data["fields."+funcKey] = l - } - fileKey := fieldMap.resolve(FieldKeyFile) - if l, ok := data[fileKey]; ok { - data["fields."+fileKey] = l - } - } -} diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod deleted file mode 100644 index 12fdf9898..000000000 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/sirupsen/logrus - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.1 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.1.1 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/sys v0.0.0-20190422165155-953cdadca894 -) diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum deleted file mode 100644 index 596c318b9..000000000 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ /dev/null @@ -1,16 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc3..000000000 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go deleted file mode 100644 index 098a21a06..000000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,121 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. - DataKey string - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // FieldKeyFunc: "@caller", - // }, - // } - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the json data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from json fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - // PrettyPrint will indent all json logs - PrettyPrint bool -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - if f.DataKey != "" { - newData := make(Fields, 4) - newData[f.DataKey] = data - data = newData - } - - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if entry.err != "" { - data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err - } - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - if entry.HasCaller() { - funcVal := entry.Caller.Function - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - if funcVal != "" { - data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal - } - if fileVal != "" { - data[f.FieldMap.resolve(FieldKeyFile)] = fileVal - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - if f.PrettyPrint { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go deleted file mode 100644 index c0c0b1e55..000000000 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ /dev/null @@ -1,351 +0,0 @@ -package logrus - -import ( - "context" - "io" - "os" - "sync" - "sync/atomic" - "time" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventurous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - - // Flag for whether to log caller info (off by default) - ReportCaller bool - - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. - Level Level - // Used to sync writing to the log. Locking is enabled by Default - mu MutexWrap - // Reusable empty entry - entryPool sync.Pool - // Function to exit the application, defaults to `os.Exit()` - ExitFunc exitFunc -} - -type exitFunc func(int) - -type MutexWrap struct { - lock sync.Mutex - disabled bool -} - -func (mw *MutexWrap) Lock() { - if !mw.disabled { - mw.lock.Lock() - } -} - -func (mw *MutexWrap) Unlock() { - if !mw.disabled { - mw.lock.Unlock() - } -} - -func (mw *MutexWrap) Disable() { - mw.disabled = true -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - ExitFunc: os.Exit, - ReportCaller: false, - } -} - -func (logger *Logger) newEntry() *Entry { - entry, ok := logger.entryPool.Get().(*Entry) - if ok { - return entry - } - return NewEntry(logger) -} - -func (logger *Logger) releaseEntry(entry *Entry) { - entry.Data = map[string]interface{}{} - logger.entryPool.Put(entry) -} - -// Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithError(err) -} - -// Add a context to the log entry. -func (logger *Logger) WithContext(ctx context.Context) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithContext(ctx) -} - -// Overrides the time of the log entry. -func (logger *Logger) WithTime(t time.Time) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithTime(t) -} - -func (logger *Logger) Logf(level Level, format string, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logf(level, format, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Tracef(format string, args ...interface{}) { - logger.Logf(TraceLevel, format, args...) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - logger.Logf(DebugLevel, format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - logger.Logf(InfoLevel, format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - entry := logger.newEntry() - entry.Printf(format, args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - logger.Logf(WarnLevel, format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - logger.Logf(ErrorLevel, format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(FatalLevel, format, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - logger.Logf(PanicLevel, format, args...) -} - -func (logger *Logger) Log(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Trace(args ...interface{}) { - logger.Log(TraceLevel, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - logger.Log(DebugLevel, args...) -} - -func (logger *Logger) Info(args ...interface{}) { - logger.Log(InfoLevel, args...) -} - -func (logger *Logger) Print(args ...interface{}) { - entry := logger.newEntry() - entry.Print(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warn(args ...interface{}) { - logger.Log(WarnLevel, args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - logger.Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - logger.Log(ErrorLevel, args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - logger.Log(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - logger.Log(PanicLevel, args...) -} - -func (logger *Logger) Logln(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logln(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Traceln(args ...interface{}) { - logger.Logln(TraceLevel, args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - logger.Logln(DebugLevel, args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - logger.Logln(InfoLevel, args...) -} - -func (logger *Logger) Println(args ...interface{}) { - entry := logger.newEntry() - entry.Println(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnln(args ...interface{}) { - logger.Logln(WarnLevel, args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - logger.Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - logger.Logln(ErrorLevel, args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - logger.Logln(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - logger.Logln(PanicLevel, args...) -} - -func (logger *Logger) Exit(code int) { - runHandlers() - if logger.ExitFunc == nil { - logger.ExitFunc = os.Exit - } - logger.ExitFunc(code) -} - -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. -func (logger *Logger) SetNoLock() { - logger.mu.Disable() -} - -func (logger *Logger) level() Level { - return Level(atomic.LoadUint32((*uint32)(&logger.Level))) -} - -// SetLevel sets the logger level. -func (logger *Logger) SetLevel(level Level) { - atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) -} - -// GetLevel returns the logger level. -func (logger *Logger) GetLevel() Level { - return logger.level() -} - -// AddHook adds a hook to the logger hooks. -func (logger *Logger) AddHook(hook Hook) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Hooks.Add(hook) -} - -// IsLevelEnabled checks if the log level of the logger is greater than the level param -func (logger *Logger) IsLevelEnabled(level Level) bool { - return logger.level() >= level -} - -// SetFormatter sets the logger formatter. -func (logger *Logger) SetFormatter(formatter Formatter) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Formatter = formatter -} - -// SetOutput sets the logger output. -func (logger *Logger) SetOutput(output io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = output -} - -func (logger *Logger) SetReportCaller(reportCaller bool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.ReportCaller = reportCaller -} - -// ReplaceHooks replaces the logger hooks and returns the old ones -func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { - logger.mu.Lock() - oldHooks := logger.Hooks - logger.Hooks = hooks - logger.mu.Unlock() - return oldHooks -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go deleted file mode 100644 index 8644761f7..000000000 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ /dev/null @@ -1,186 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint32 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - if b, err := level.MarshalText(); err == nil { - return string(b) - } else { - return "unknown" - } -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - case "trace": - return TraceLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (level *Level) UnmarshalText(text []byte) error { - l, err := ParseLevel(string(text)) - if err != nil { - return err - } - - *level = Level(l) - - return nil -} - -func (level Level) MarshalText() ([]byte, error) { - switch level { - case TraceLevel: - return []byte("trace"), nil - case DebugLevel: - return []byte("debug"), nil - case InfoLevel: - return []byte("info"), nil - case WarnLevel: - return []byte("warning"), nil - case ErrorLevel: - return []byte("error"), nil - case FatalLevel: - return []byte("fatal"), nil - case PanicLevel: - return []byte("panic"), nil - } - - return nil, fmt.Errorf("not a valid logrus level %d", level) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, - TraceLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel - // TraceLevel level. Designates finer-grained informational events than the Debug. - TraceLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) - - // IsDebugEnabled() bool - // IsInfoEnabled() bool - // IsWarnEnabled() bool - // IsErrorEnabled() bool - // IsFatalEnabled() bool - // IsPanicEnabled() bool -} - -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. -type Ext1FieldLogger interface { - FieldLogger - Tracef(format string, args ...interface{}) - Trace(args ...interface{}) - Traceln(args ...interface{}) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go deleted file mode 100644 index 2403de981..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return true -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go deleted file mode 100644 index 3c4f43f91..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} - diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go deleted file mode 100644 index 97af92c68..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build js nacl plan9 - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go deleted file mode 100644 index 3293fb3ca..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !appengine,!js,!windows,!nacl,!plan9 - -package logrus - -import ( - "io" - "os" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return isTerminal(int(v.Fd())) - default: - return false - } -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go deleted file mode 100644 index f6710b3bd..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go deleted file mode 100644 index 355dc966f..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux aix - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} - diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go deleted file mode 100644 index 572889db2..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - "syscall" - - sequences "github.com/konsorten/go-windows-terminal-sequences" -) - -func initTerminal(w io.Writer) { - switch v := w.(type) { - case *os.File: - sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) - } -} - -func checkIfTerminal(w io.Writer) bool { - var ret bool - switch v := w.(type) { - case *os.File: - var mode uint32 - err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) - ret = (err == nil) - default: - ret = false - } - if ret { - initTerminal(w) - } - return ret -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go deleted file mode 100644 index e01587c43..000000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,295 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strings" - "sync" - "time" -) - -const ( - red = 31 - yellow = 33 - blue = 36 - gray = 37 -) - -var baseTimestamp time.Time - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ - EnvironmentOverrideColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // The keys sorting function, when uninitialized it uses sort.Strings. - SortingFunc func([]string) - - // Disables the truncation of the level text to 4 characters. - DisableLevelTruncation bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &TextFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message"}} - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - terminalInitOnce sync.Once -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } -} - -func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) - - if f.EnvironmentOverrideColors { - if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { - isColored = true - } else if ok && force == "0" { - isColored = false - } else if os.Getenv("CLICOLOR") == "0" { - isColored = false - } - } - - return isColored && !f.DisableColors -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields) - for k, v := range entry.Data { - data[k] = v - } - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - var funcVal, fileVal string - - fixedKeys := make([]string, 0, 4+len(data)) - if !f.DisableTimestamp { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) - } - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) - if entry.Message != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) - } - if entry.err != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) - } - if entry.HasCaller() { - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } else { - funcVal = entry.Caller.Function - fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - } - - if funcVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) - } - if fileVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) - } - } - - if !f.DisableSorting { - if f.SortingFunc == nil { - sort.Strings(keys) - fixedKeys = append(fixedKeys, keys...) - } else { - if !f.isColored() { - fixedKeys = append(fixedKeys, keys...) - f.SortingFunc(fixedKeys) - } else { - f.SortingFunc(keys) - } - } - } else { - fixedKeys = append(fixedKeys, keys...) - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - f.terminalInitOnce.Do(func() { f.init(entry) }) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if f.isColored() { - f.printColored(b, entry, keys, data, timestampFormat) - } else { - - for _, key := range fixedKeys { - var value interface{} - switch { - case key == f.FieldMap.resolve(FieldKeyTime): - value = entry.Time.Format(timestampFormat) - case key == f.FieldMap.resolve(FieldKeyLevel): - value = entry.Level.String() - case key == f.FieldMap.resolve(FieldKeyMsg): - value = entry.Message - case key == f.FieldMap.resolve(FieldKeyLogrusError): - value = entry.err - case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = funcVal - case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fileVal - default: - value = data[key] - } - f.appendKeyValue(b, key, value) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel, TraceLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation { - levelText = levelText[0:4] - } - - // Remove a single newline if it already exists in the message to keep - // the behavior of logrus text_formatter the same as the stdlib log package - entry.Message = strings.TrimSuffix(entry.Message, "\n") - - caller := "" - if entry.HasCaller() { - funcVal := fmt.Sprintf("%s()", entry.Caller.Function) - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - - if fileVal == "" { - caller = funcVal - } else if funcVal == "" { - caller = fileVal - } else { - caller = fileVal + " " + funcVal - } - } - - if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) - } - for _, k := range keys { - v := data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go deleted file mode 100644 index 9e1f75135..000000000 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ /dev/null @@ -1,64 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - return NewEntry(logger).WriterLevel(level) -} - -func (entry *Entry) Writer() *io.PipeWriter { - return entry.WriterLevel(InfoLevel) -} - -func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - switch level { - case TraceLevel: - printFunc = entry.Trace - case DebugLevel: - printFunc = entry.Debug - case InfoLevel: - printFunc = entry.Info - case WarnLevel: - printFunc = entry.Warn - case ErrorLevel: - printFunc = entry.Error - case FatalLevel: - printFunc = entry.Fatal - case PanicLevel: - printFunc = entry.Panic - default: - printFunc = entry.Print - } - - go entry.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - entry.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 1b8c7c261..000000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe - -cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068..000000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index 5afcb2096..000000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -matrix: - include: - - go: 1.9.4 - - go: 1.10.0 - - go: tip - allow_failures: - - go: tip - -before_install: - - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - - chmod +x bin/shellcheck -script: - - PATH=$PATH:$PWD/bin go test -v ./... - - go build - - diff -u <(echo -n) <(gofmt -d -s .) - - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); - fi diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e266..000000000 --- a/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 851fcc087..000000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,736 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Moby (former Docker)](https://github.com/moby/moby) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [rclone](http://rclone.org/) -* [nehm](https://github.com/bogem/nehm) -* [Pouch](https://github.com/alibaba/pouch) - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) - -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating bash completions](#generating-bash-completions) -- [Contributing](#contributing) -- [License](#license) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: - - go get -u github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func init() { - cobra.OnInitialize(initConfig) - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} - -func initConfig() { - // Don't forget to read config either from cfgFile or from home directory! - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - if err := viper.ReadInConfig(); err != nil { - fmt.Println("Can't read config:", err) - os.Exit(1) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires at least one arg") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. in the following formats: - -- [Markdown](doc/md_docs.md) -- [ReStructured Text](doc/rest_docs.md) -- [Man Page](doc/man_docs.md) - -## Generating bash completions - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -# Contributing - -1. Fork it -2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -3. Create your feature branch (`git checkout -b my-new-feature`) -4. Make changes and add them (`git add .`) -5. Commit your changes (`git commit -m 'Add some feature'`) -6. Push to the branch (`git push origin my-new-feature`) -7. Create new pull request - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go deleted file mode 100644 index a5d8a9273..000000000 --- a/vendor/github.com/spf13/cobra/args.go +++ /dev/null @@ -1,89 +0,0 @@ -package cobra - -import ( - "fmt" -) - -type PositionalArgs func(cmd *Command, args []string) error - -// Legacy arg validation has the following behaviour: -// - root commands with no subcommands can take arbitrary arguments -// - root commands with subcommands will do subcommand validity checking -// - subcommands will always accept arbitrary arguments -func legacyArgs(cmd *Command, args []string) error { - // no subcommand, always take args - if !cmd.HasSubCommands() { - return nil - } - - // root command with subcommands, do subcommand checking. - if !cmd.HasParent() && len(args) > 0 { - return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - return nil -} - -// NoArgs returns an error if any args are included. -func NoArgs(cmd *Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) - } - return nil -} - -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. -func OnlyValidArgs(cmd *Command, args []string) error { - if len(cmd.ValidArgs) > 0 { - for _, v := range args { - if !stringInSlice(v, cmd.ValidArgs) { - return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - } - } - return nil -} - -// ArbitraryArgs never returns an error. -func ArbitraryArgs(cmd *Command, args []string) error { - return nil -} - -// MinimumNArgs returns an error if there is not at least N args. -func MinimumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < n { - return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) - } - return nil - } -} - -// MaximumNArgs returns an error if there are more than N args. -func MaximumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) > n { - return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactArgs returns an error if there are not exactly n args. -func ExactArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) != n { - return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// RangeArgs returns an error if the number of args is not within the expected range. -func RangeArgs(min int, max int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < min || len(args) > max { - return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) - } - return nil - } -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 8fa8f486f..000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,584 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -// Annotations for Bash completion. -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func writePreamble(buf *bytes.Buffer, name string) { - buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__%[1]s_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__%[1]s_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__%[1]s_handle_reply() -{ - __%[1]s_debug "${FUNCNAME[0]}" - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__%[1]s_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__%[1]s_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 -} - -__%[1]s_handle_flag() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__%[1]s_handle_noun() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__%[1]s_handle_command() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_%[1]s_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__%[1]s_handle_word() -{ - if [[ $c -ge $cword ]]; then - __%[1]s_handle_reply - return - fi - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __%[1]s_handle_flag - elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then - __%[1]s_handle_command - elif [[ $c -eq 0 ]]; then - __%[1]s_handle_command - elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then - # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - words[c]=${aliashash[${words[c]}]} - __%[1]s_handle_command - else - __%[1]s_handle_noun - fi - else - __%[1]s_handle_noun - fi - __%[1]s_handle_word -} - -`, name)) -} - -func writePostscript(buf *bytes.Buffer, name string) { - name = strings.Replace(name, ":", "__", -1) - buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) - buf.WriteString(fmt.Sprintf(`{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - declare -A aliashash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __%[1]s_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%[1]s") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __%[1]s_handle_word -} - -`, name)) - buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name)) - buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" commands=()\n") - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) - writeCmdAliases(buf, c) - } - buf.WriteString("\n") -} - -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) > 0 { - ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") - } else { - ext = "_filedir" - } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - case BashCompCustom: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - if len(value) > 0 { - handlers := strings.Join(value, "; ") - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) - } else { - buf.WriteString(" flags_completion+=(:)\n") - } - case BashCompSubdirsInDir: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) == 1 { - ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] - } else { - ext = "_filedir -d" - } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - } - } -} - -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { - name := flag.Shorthand - format := " " - if len(flag.NoOptDefVal) == 0 { - format += "two_word_" - } - format += "flags+=(\"-%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) - writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) -} - -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { - name := flag.Name - format := " flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) - writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) -} - -func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) -} - -func writeFlags(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - if localNonPersistentFlags.Lookup(flag.Name) != nil { - writeLocalNonPersistentFlag(buf, flag) - } - }) - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - }) - - buf.WriteString("\n") -} - -func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) - } - } - } - }) -} - -func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) - for _, value := range cmd.ValidArgs { - buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) - } -} - -func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { - if len(cmd.Aliases) == 0 { - return - } - - sort.Sort(sort.StringSlice(cmd.Aliases)) - - buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) - for _, value := range cmd.Aliases { - buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) - buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) - } - buf.WriteString(` fi`) - buf.WriteString("\n") -} -func writeArgAliases(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" noun_aliases=()\n") - sort.Sort(sort.StringSlice(cmd.ArgAliases)) - for _, value := range cmd.ArgAliases { - buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) - } -} - -func gen(buf *bytes.Buffer, cmd *Command) { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - gen(buf, c) - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) - - if cmd.Root() == cmd { - buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) - } else { - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) - } - - buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) - buf.WriteString("\n") - buf.WriteString(" command_aliases=()\n") - buf.WriteString("\n") - - writeCommands(buf, cmd) - writeFlags(buf, cmd) - writeRequiredFlag(buf, cmd) - writeRequiredNouns(buf, cmd) - writeArgAliases(buf, cmd) - buf.WriteString("}\n\n") -} - -// GenBashCompletion generates bash completion file and writes to the passed writer. -func (c *Command) GenBashCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - writePreamble(buf, c.Name()) - if len(c.BashCompletionFunction) > 0 { - buf.WriteString(c.BashCompletionFunction + "\n") - } - gen(buf, c) - writePostscript(buf, c.Name()) - - _, err := buf.WriteTo(w) - return err -} - -func nonCompletableFlag(flag *pflag.Flag) bool { - return flag.Hidden || len(flag.Deprecated) > 0 -} - -// GenBashCompletionFile generates bash completion file. -func (c *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletion(outFile) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index e79d4769d..000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,221 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) - -func main() { - kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specify custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify -a custom flag completion function with cobra.BashCompCustom: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` -# Using bash aliases for commands - -You can also configure the `bash aliases` for the commands and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$) aliasname -completion firstcommand secondcommand -``` diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index 7010fd15b..000000000 --- a/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "text/template" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "trimTrailingWhitespaces": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() - -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing -// to automatically enable in CLI tools. -// Set this to true to enable it. -var EnablePrefixMatching = false - -// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -// To disable sorting, set it to false. -var EnableCommandSorting = true - -// MousetrapHelpText enables an information splash screen on Windows -// if the CLI is started from explorer.exe. -// To disable the mousetrap, just set this variable to blank string (""). -// Works only on Microsoft Windows. -var MousetrapHelpText string = `This is a command line tool. - -You need to open cmd.exe and run it from there. -` - -// AddTemplateFunc adds a template function that's available to Usage and Help -// template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -// AddTemplateFuncs adds multiple template functions that are available to Usage and -// Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -// OnInitialize sets the passed functions to be run when each command's -// Execute method is called. -func OnInitialize(y ...func()) { - initializers = append(initializers, y...) -} - -// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -// ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -// rpad adds padding to the right of a string. -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them. -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 34d1bf367..000000000 --- a/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1517 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -// FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist - -// Command is just that, a command for your application. -// E.g. 'go run ...' - 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Use is the one-line usage message. - Use string - - // Aliases is an array of aliases that can be used instead of the first word in Use. - Aliases []string - - // SuggestFor is an array of command names for which this command will be suggested - - // similar to aliases but only suggests. - SuggestFor []string - - // Short is the short description shown in the 'help' output. - Short string - - // Long is the long message shown in the 'help ' output. - Long string - - // Example is examples of how to use the command. - Example string - - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions - ValidArgs []string - - // Expected arguments - Args PositionalArgs - - // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, - // but accepted if entered manually. - ArgAliases []string - - // BashCompletionFunction is custom functions used by the bash autocompletion generator. - BashCompletionFunction string - - // Deprecated defines, if this command is deprecated and should print this string when used. - Deprecated string - - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - - // Annotations are key/value pairs that can be used by applications to identify or - // group commands. - Annotations map[string]string - - // Version defines the version for this command. If this value is non-empty and the command does not - // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. - Version string - - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name. - // - // PersistentPreRun: children of this command will inherit and execute. - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error. - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error. - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this. - Run func(cmd *Command, args []string) - // RunE: Run but returns an error. - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error. - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun. - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error. - PersistentPostRunE func(cmd *Command, args []string) error - - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - //FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - - // args is actual args parsed from flags. - args []string - // flagErrorBuf contains all error messages from pflag. - flagErrorBuf *bytes.Buffer - // flags is full set of flags. - flags *flag.FlagSet - // pflags contains persistent flags. - pflags *flag.FlagSet - // lflags contains local flags. - lflags *flag.FlagSet - // iflags contains inherited flags. - iflags *flag.FlagSet - // parentsPflags is all persistent flags of cmd's parents. - parentsPflags *flag.FlagSet - // globNormFunc is the global normalization function - // that we can use on every pflag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // output is an output writer defined by user. - output io.Writer - // usageFunc is usage func defined by user. - usageFunc func(*Command) error - // usageTemplate is usage template defined by user. - usageTemplate string - // flagErrorFunc is func defined by user and it's called when the parsing of - // flags returns an error. - flagErrorFunc func(*Command, error) error - // helpTemplate is help template defined by user. - helpTemplate string - // helpFunc is help func defined by user. - helpFunc func(*Command, []string) - // helpCommand is command with usage 'help'. If it's not defined by user, - // cobra uses default help command. - helpCommand *Command - // versionTemplate is the version template defined by user. - versionTemplate string -} - -// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (c *Command) SetOutput(output io.Writer) { - c.output = output -} - -// SetUsageFunc sets usage function. Usage can be defined by application. -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// SetUsageTemplate sets usage template. Can be defined by Application. -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// SetFlagErrorFunc sets a function to generate an error when flag parsing -// fails. -func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { - c.flagErrorFunc = f -} - -// SetHelpFunc sets help function. Can be defined by Application. -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -// SetHelpCommand sets help command. -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// SetHelpTemplate sets help template to be used. Application can use it to set custom template. -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetVersionTemplate sets version template to be used. Application can use it to set custom template. -func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -// OutOrStdout returns output to stdout. -func (c *Command) OutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// OutOrStderr returns output to stderr -func (c *Command) OutOrStderr() io.Writer { - return c.getOut(os.Stderr) -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return c.output - } - if c.HasParent() { - return c.parent.getOut(def) - } - return def -} - -// UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function. -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - if c.HasParent() { - return c.Parent().UsageFunc() - } - return func(c *Command) error { - c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } -} - -// Usage puts out the usage for the command. -// Used when a user provides invalid input. -// Can be defined by user by overriding UsageFunc. -func (c *Command) Usage() error { - return c.UsageFunc()(c) -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior. -func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc - } - if c.HasParent() { - return c.Parent().HelpFunc() - } - return func(c *Command, a []string) { - c.mergePersistentFlags() - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) - if err != nil { - c.Println(err) - } - } -} - -// Help puts out the help for the command. -// Used when a user calls help [command]. -// Can be defined by user by overriding HelpFunc. -func (c *Command) Help() error { - c.HelpFunc()(c, []string{}) - return nil -} - -// UsageString return usage string. -func (c *Command) UsageString() string { - tmpOutput := c.output - bb := new(bytes.Buffer) - c.SetOutput(bb) - c.Usage() - c.output = tmpOutput - return bb.String() -} - -// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this -// command or a parent, or it returns a function which returns the original -// error. -func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { - if c.flagErrorFunc != nil { - return c.flagErrorFunc - } - - if c.HasParent() { - return c.parent.FlagErrorFunc() - } - return func(c *Command, err error) error { - return err - } -} - -var minUsagePadding = 25 - -// UsagePadding return padding for the usage. -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// CommandPathPadding return padding for the command path. -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -// NamePadding returns padding for the name. -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -// UsageTemplate returns usage template for the command. -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -// HelpTemplate return help template for the command. -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// VersionTemplate return version template for the command. -func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate - } - - if c.HasParent() { - return c.parent.VersionTemplate() - } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` -} - -func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { - flag := fs.Lookup(name) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { - if len(name) == 0 { - return false - } - - flag := fs.ShorthandLookup(name[:1]) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func stripFlags(args []string, c *Command) []string { - if len(args) == 0 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - flags := c.Flags() - -Loop: - for len(args) > 0 { - s := args[0] - args = args[1:] - switch { - case s == "--": - // "--" terminates the flags - break Loop - case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): - // If '--flag arg' then - // delete arg from args. - fallthrough // (do the same as below) - case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): - // If '-f arg' then - // delete 'arg' from args or break the loop if len(args) <= 1. - if len(args) <= 1 { - break Loop - } else { - args = args[1:] - continue - } - case s != "" && !strings.HasPrefix(s, "-"): - commands = append(commands, s) - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || - (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) -} - -// Find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - - cmd := c.findNext(nextSubCmd) - if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - if commandFound.Args == nil { - return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) - } - return commandFound, a, nil -} - -func (c *Command) findSuggestions(arg string) string { - if c.DisableSuggestions { - return "" - } - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - suggestionsString := "" - if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - return suggestionsString -} - -func (c *Command) findNext(next string) *Command { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == next || cmd.HasAlias(next) { - cmd.commandCalledAs.name = next - return cmd - } - if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { - matches = append(matches, cmd) - } - } - - if len(matches) == 1 { - return matches[0] - } - - return nil -} - -// Traverse the command tree to find the command, and parse args for -// each parent. -func (c *Command) Traverse(args []string) (*Command, []string, error) { - flags := []string{} - inFlag := false - - for i, arg := range args { - switch { - // A long flag with a space separated value - case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) - flags = append(flags, arg) - continue - // A short flag with a space separated value - case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): - inFlag = true - flags = append(flags, arg) - continue - // The value for a flag - case inFlag: - inFlag = false - flags = append(flags, arg) - continue - // A flag without a value, or with an `=` separated value - case isFlagArg(arg): - flags = append(flags, arg) - continue - } - - cmd := c.findNext(arg) - if cmd == nil { - return c, args, nil - } - - if err := c.ParseFlags(flags); err != nil { - return nil, args, err - } - return cmd.Traverse(args[i+1:]) - } - return c, args, nil -} - -// SuggestionsFor provides suggestions for the typedName. -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -// VisitParents visits all parents of the command and invokes fn on each parent. -func (c *Command) VisitParents(fn func(*Command)) { - if c.HasParent() { - fn(c.Parent()) - c.Parent().VisitParents(fn) - } -} - -// Root finds root command. -func (c *Command) Root() *Command { - if c.HasParent() { - return c.Parent().Root() - } - return c -} - -// ArgsLenAtDash will return the length of c.Flags().Args at the moment -// when a -- was found during args parsing. -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help and version flag at the last point possible to allow for user - // overriding - c.InitDefaultHelpFlag() - c.InitDefaultVersionFlag() - - err = c.ParseFlags(a) - if err != nil { - return c.FlagErrorFunc()(c, err) - } - - // If help is called, regardless of other flags, return we want help. - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in InitDefaultHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - - if helpVal { - return flag.ErrHelp - } - - // for back-compat, only add version flag behavior if version is defined - if c.Version != "" { - versionVal, err := c.Flags().GetBool("version") - if err != nil { - c.Println("\"version\" flag declared as non-bool. Please correct your code") - return err - } - if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } - } - - if !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - if err := c.ValidateArgs(argWoFlags); err != nil { - return err - } - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if err := c.validateRequiredFlags(); err != nil { - return err - } - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -// Execute uses the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -// ExecuteC executes the command. -func (c *Command) ExecuteC() (cmd *Command, err error) { - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help as the last point possible to allow for user - // overriding - c.InitDefaultHelpCmd() - - var args []string - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } else { - args = c.args - } - - var flags []string - if c.TraverseChildren { - cmd, flags, err = c.Traverse(args) - } else { - cmd, flags, err = c.Find(args) - } - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - - cmd.commandCalledAs.called = true - if cmd.commandCalledAs.name == "" { - cmd.commandCalledAs.name = cmd.Name() - } - - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if err == flag.ErrHelp { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilentErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.Println("Error:", err.Error()) - } - - // If root command has SilentUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - } - return cmd, err -} - -func (c *Command) ValidateArgs(args []string) error { - if c.Args == nil { - return nil - } - return c.Args(c, args) -} - -func (c *Command) validateRequiredFlags() error { - flags := c.Flags() - missingFlagNames := []string{} - flags.VisitAll(func(pflag *flag.Flag) { - requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] - if !found { - return - } - if (requiredAnnotation[0] == "true") && !pflag.Changed { - missingFlagNames = append(missingFlagNames, pflag.Name) - } - }) - - if len(missingFlagNames) > 0 { - return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) - } - return nil -} - -// InitDefaultHelpFlag adds default help flag to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help flag, it will do nothing. -func (c *Command) InitDefaultHelpFlag() { - c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { - usage := "help for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - c.Flags().BoolP("help", "h", false, usage) - } -} - -// InitDefaultVersionFlag adds default version flag to c. -// It is called automatically by executing the c. -// If c already has a version flag, it will do nothing. -// If c.Version is empty, it will do nothing. -func (c *Command) InitDefaultVersionFlag() { - if c.Version == "" { - return - } - - c.mergePersistentFlags() - if c.Flags().Lookup("version") == nil { - usage := "version for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - c.Flags().Bool("version", false, usage) - } -} - -// InitDefaultHelpCmd adds default help command to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help command or c has no subcommands, it will do nothing. -func (c *Command) InitDefaultHelpCmd() { - if !c.HasSubCommands() { - return - } - - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q\n", args) - c.Root().Usage() - } else { - cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.Help() - } - }, - } - } - c.RemoveCommand(c.helpCommand) - c.AddCommand(c.helpCommand) -} - -// ResetCommands delete parent, subcommand and help command from c. -func (c *Command) ResetCommands() { - c.parent = nil - c.commands = nil - c.helpCommand = nil - c.parentsPflags = nil -} - -// Sorts commands by their names. -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.OutOrStderr(), i...) -} - -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. -func (c *Command) Println(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. -func (c *Command) Printf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - if c.HasParent() { - return c.Parent().CommandPath() + " " + c.Name() - } - return c.Name() -} - -// UseLine puts out the full usage for a given command (including parents). -func (c *Command) UseLine() string { - var useline string - if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use - } else { - useline = c.Use - } - if c.DisableFlagsInUseLine { - return useline - } - if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { - useline += " [flags]" - } - return useline -} - -// DebugFlags used to determine which flags have been assigned to which commands -// and which persist. -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -// CalledAs returns the command name or alias that was used to invoke -// this command or an empty string if the command has not been called. -func (c *Command) CalledAs() string { - if c.commandCalledAs.called { - return c.commandCalledAs.name - } - return "" -} - -// hasNameOrAliasPrefix returns true if the Name or any of aliases start -// with prefix -func (c *Command) hasNameOrAliasPrefix(prefix string) bool { - if strings.HasPrefix(c.Name(), prefix) { - c.commandCalledAs.name = c.Name() - return true - } - for _, alias := range c.Aliases { - if strings.HasPrefix(alias, prefix) { - c.commandCalledAs.name = alias - return true - } - } - return false -} - -// NameAndAliases returns a list of the command name and all aliases -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -// HasExample determines if the command has example. -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable. -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands. -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands). -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsAdditionalHelpTopicCommand determines if a command is an additional -// help topic command; additional help topic command is determined by the -// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that -// are runnable/hidden/deprecated. -// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. -func (c *Command) IsAdditionalHelpTopicCommand() bool { - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsAdditionalHelpTopicCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any available 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics'. -func (c *Command) HasHelpSubCommands() bool { - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsAdditionalHelpTopicCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands'. -func (c *Command) HasAvailableSubCommands() bool { - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub commands, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// HasParent determines if the command is a child command. -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Flags returns the complete FlagSet that applies -// to this command (local and persistent declared here and by all parents). -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// LocalFlags returns the local FlagSet specifically set in the current command. -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - } - c.lflags.SortFlags = c.Flags().SortFlags - if c.globNormFunc != nil { - c.lflags.SetNormalizeFunc(c.globNormFunc) - } - - addToLocal := func(f *flag.Flag) { - if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { - c.lflags.AddFlag(f) - } - } - c.Flags().VisitAll(addToLocal) - c.PersistentFlags().VisitAll(addToLocal) - return c.lflags -} - -// InheritedFlags returns all flags which were inherited from parents commands. -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.iflags.SetOutput(c.flagErrorBuf) - } - - local := c.LocalFlags() - if c.globNormFunc != nil { - c.iflags.SetNormalizeFunc(c.globNormFunc) - } - - c.parentsPflags.VisitAll(func(f *flag.Flag) { - if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - c.iflags.AddFlag(f) - } - }) - return c.iflags -} - -// NonInheritedFlags returns all flags which were not inherited from parent commands. -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// PersistentFlags returns the persistent FlagSet specifically set in the current command. -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// ResetFlags deletes all flags from command. -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) - - c.lflags = nil - c.iflags = nil - c.parentsPflags = nil -} - -// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// HasPersistentFlags checks if the command contains persistent flags. -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// HasLocalFlags checks if the command has flags specifically declared locally. -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// HasInheritedFlags checks if the command has flags inherited from its parent command. -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated. -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden -// or deprecated. -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are -// not hidden or deprecated. -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag. -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// Recursively find matching persistent flag. -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil { - c.updateParentsPflags() - flag = c.parentsPflags.Lookup(name) - } - return -} - -// ParseFlags parses persistent flag tree and local flags. -func (c *Command) ParseFlags(args []string) error { - if c.DisableFlagParsing { - return nil - } - - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - beforeErrorBufLen := c.flagErrorBuf.Len() - c.mergePersistentFlags() - - //do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) - - err := c.Flags().Parse(args) - // Print warnings if they occurred (e.g. deprecated flag messages). - if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { - c.Print(c.flagErrorBuf.String()) - } - - return err -} - -// Parent returns a commands parent command. -func (c *Command) Parent() *Command { - return c.parent -} - -// mergePersistentFlags merges c.PersistentFlags() to c.Flags() -// and adds missing persistent flags of all parents. -func (c *Command) mergePersistentFlags() { - c.updateParentsPflags() - c.Flags().AddFlagSet(c.PersistentFlags()) - c.Flags().AddFlagSet(c.parentsPflags) -} - -// updateParentsPflags updates c.parentsPflags by adding -// new persistent flags of all parents. -// If c.parentsPflags == nil, it makes new. -func (c *Command) updateParentsPflags() { - if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.parentsPflags.SetOutput(c.flagErrorBuf) - c.parentsPflags.SortFlags = false - } - - if c.globNormFunc != nil { - c.parentsPflags.SetNormalizeFunc(c.globNormFunc) - } - - c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) - - c.VisitParents(func(parent *Command) { - c.parentsPflags.AddFlagSet(parent.PersistentFlags()) - }) -} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index 6159c1cc1..000000000 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index edec728e4..000000000 --- a/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build windows - -package cobra - -import ( - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -func preExecHook(c *Command) { - if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - time.Sleep(5 * time.Second) - os.Exit(1) - } -} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go deleted file mode 100644 index 889c22e27..000000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ /dev/null @@ -1,126 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" -) - -// GenZshCompletionFile generates zsh completion file. -func (c *Command) GenZshCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenZshCompletion(outFile) -} - -// GenZshCompletion generates a zsh completion file and writes to the passed writer. -func (c *Command) GenZshCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - - writeHeader(buf, c) - maxDepth := maxDepth(c) - writeLevelMapping(buf, maxDepth) - writeLevelCases(buf, maxDepth, c) - - _, err := buf.WriteTo(w) - return err -} - -func writeHeader(w io.Writer, cmd *Command) { - fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) -} - -func maxDepth(c *Command) int { - if len(c.Commands()) == 0 { - return 0 - } - maxDepthSub := 0 - for _, s := range c.Commands() { - subDepth := maxDepth(s) - if subDepth > maxDepthSub { - maxDepthSub = subDepth - } - } - return 1 + maxDepthSub -} - -func writeLevelMapping(w io.Writer, numLevels int) { - fmt.Fprintln(w, `_arguments \`) - for i := 1; i <= numLevels; i++ { - fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) - fmt.Fprintln(w) - } - fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") - fmt.Fprintln(w) -} - -func writeLevelCases(w io.Writer, maxDepth int, root *Command) { - fmt.Fprintln(w, "case $state in") - defer fmt.Fprintln(w, "esac") - - for i := 1; i <= maxDepth; i++ { - fmt.Fprintf(w, " level%d)\n", i) - writeLevel(w, root, i) - fmt.Fprintln(w, " ;;") - } - fmt.Fprintln(w, " *)") - fmt.Fprintln(w, " _arguments '*: :_files'") - fmt.Fprintln(w, " ;;") -} - -func writeLevel(w io.Writer, root *Command, i int) { - fmt.Fprintf(w, " case $words[%d] in\n", i) - defer fmt.Fprintln(w, " esac") - - commands := filterByLevel(root, i) - byParent := groupByParent(commands) - - for p, c := range byParent { - names := names(c) - fmt.Fprintf(w, " %s)\n", p) - fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) - fmt.Fprintln(w, " ;;") - } - fmt.Fprintln(w, " *)") - fmt.Fprintln(w, " _arguments '*: :_files'") - fmt.Fprintln(w, " ;;") - -} - -func filterByLevel(c *Command, l int) []*Command { - cs := make([]*Command, 0) - if l == 0 { - cs = append(cs, c) - return cs - } - for _, s := range c.Commands() { - cs = append(cs, filterByLevel(s, l-1)...) - } - return cs -} - -func groupByParent(commands []*Command) map[string][]*Command { - m := make(map[string][]*Command) - for _, c := range commands { - parent := c.Parent() - if parent == nil { - continue - } - m[parent.Name()] = append(m[parent.Name()], c) - } - return m -} - -func names(commands []*Command) []string { - ns := make([]string, len(commands)) - for i, c := range commands { - ns[i] = c.Name() - } - return ns -} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da29013..000000000 --- a/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index 00d04cb9b..000000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false - -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get golang.org/x/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea..000000000 --- a/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index 7eacc5bdb..000000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helper functions available to get the value stored in a Flag if you have a FlagSet but find -it difficult to keep up with all of the pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bfd..000000000 --- a/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 3731370d6..000000000 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,185 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func (s *boolSliceValue) fromString(val string) (bool, error) { - return strconv.ParseBool(val) -} - -func (s *boolSliceValue) toString(val bool) string { - return strconv.FormatBool(val) -} - -func (s *boolSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *boolSliceValue) Replace(val []string) error { - out := make([]bool, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *boolSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go deleted file mode 100644 index 67d530457..000000000 --- a/vendor/github.com/spf13/pflag/bytes.go +++ /dev/null @@ -1,209 +0,0 @@ -package pflag - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - "strings" -) - -// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded -type bytesHexValue []byte - -// String implements pflag.Value.String. -func (bytesHex bytesHexValue) String() string { - return fmt.Sprintf("%X", []byte(bytesHex)) -} - -// Set implements pflag.Value.Set. -func (bytesHex *bytesHexValue) Set(value string) error { - bin, err := hex.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesHex = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesHexValue) Type() string { - return "bytesHex" -} - -func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { - *p = val - return (*bytesHexValue)(p) -} - -func bytesHexConv(sval string) (interface{}, error) { - - bin, err := hex.DecodeString(sval) - - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesHex return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesHex", bytesHexConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesHexVar(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, "", value, usage) - return p -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, shorthand, value, usage) - return p -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesHex(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, "", value, usage) -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, shorthand, value, usage) -} - -// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded -type bytesBase64Value []byte - -// String implements pflag.Value.String. -func (bytesBase64 bytesBase64Value) String() string { - return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) -} - -// Set implements pflag.Value.Set. -func (bytesBase64 *bytesBase64Value) Set(value string) error { - bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesBase64 = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesBase64Value) Type() string { - return "bytesBase64" -} - -func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { - *p = val - return (*bytesBase64Value)(p) -} - -func bytesBase64ValueConv(sval string) (interface{}, error) { - - bin, err := base64.StdEncoding.DecodeString(sval) - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesBase64 return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, "", value, usage) - return p -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, shorthand, value, usage) - return p -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesBase64(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, "", value, usage) -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index a0b2679f7..000000000 --- a/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - // "+1" means that no specific value was passed, so increment - if s == "+1" { - *i = countValue(*i + 1) - return nil - } - v, err := strconv.ParseInt(s, 0, 0) - *i = countValue(v) - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "+1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef88..000000000 --- a/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go deleted file mode 100644 index badadda53..000000000 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strings" - "time" -) - -// -- durationSlice Value -type durationSliceValue struct { - value *[]time.Duration - changed bool -} - -func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { - dsv := new(durationSliceValue) - dsv.value = p - *dsv.value = val - return dsv -} - -func (s *durationSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *durationSliceValue) Type() string { - return "durationSlice" -} - -func (s *durationSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%s", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *durationSliceValue) fromString(val string) (time.Duration, error) { - return time.ParseDuration(val) -} - -func (s *durationSliceValue) toString(val time.Duration) string { - return fmt.Sprintf("%s", val) -} - -func (s *durationSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *durationSliceValue) Replace(val []string) error { - out := make([]time.Duration, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *durationSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func durationSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []time.Duration{}, nil - } - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetDurationSlice returns the []time.Duration value of a flag with the given name -func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { - val, err := f.getFlagType(name, "durationSlice", durationSliceConv) - if err != nil { - return []time.Duration{}, err - } - return val.([]time.Duration), nil -} - -// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. -// The argument p points to a []time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. -// The argument p points to a duration[] variable in which to store the value of the flag. -func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, "", value, usage) - return &p -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, "", value, usage) -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 24a5036e9..000000000 --- a/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") - } - flag.VarP(&flagval, "varname", "v", "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - goflag "flag" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { - // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags - UnknownFlags bool -} - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName - - addedGoFlagSets []*goflag.FlagSet -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// SliceValue is a secondary interface to all flags which hold a list -// of values. This allows full control over the value of list flags, -// and avoids complicated marshalling and unmarshalling to csv. -type SliceValue interface { - // Append adds the specified value to the end of the flag value list. - Append(string) error - // Replace will fully overwrite any data currently in the flag value list. - Replace([]string) error - // GetSlice returns the flag value list as an array of strings. - GetSlice() []string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for fname, flag := range f.formal { - nname := f.normalizeFlagName(flag.Name) - if fname == nname { - continue - } - flag.Name = string(nname) - delete(f.formal, fname) - f.formal[nname] = flag - if _, set := f.actual[fname]; set { - delete(f.actual, fname) - f.actual[nname] = flag - } - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags defined. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// that are not hidden. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -// It panics, if len(name) > 1. -func (f *FlagSet) ShorthandLookup(name string) *Flag { - if name == "" { - return nil - } - if len(name) > 1 { - msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - c := name[0] - return f.shorthands[c] -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - flag.Hidden = true - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -func ShorthandLookup(name string) *Flag { - return CommandLine.ShorthandLookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if !flag.Changed { - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - } - - if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - case "stringSlice": - name = "strings" - case "intSlice": - name = "ints" - case "uintSlice": - name = "uints" - case "boolSlice": - name = "bools" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t\n") - if w <= 0 { - return s, "" - } - nlPos := strings.LastIndex(s[:i], "\n") - if nlPos > 0 && nlPos < w { - return s[:nlPos], s[nlPos+1:] - } - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return strings.Replace(s, "\n", r, -1) - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - case "count": - if flag.NoOptDefVal != "+1" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - if len(flag.Deprecated) != 0 { - line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) - f.usage() - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) -func stripUnknownFlagValue(args []string) []string { - if len(args) == 0 { - //--unknown - return args - } - - first := args[0] - if len(first) > 0 && first[0] == '-' { - //--unknown --next-flag ... - return args - } - - //--unknown arg ... (args will be arg ...) - if len(args) > 1 { - return args[1:] - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - - if !exists { - switch { - case name == "help": - f.usage() - return a, ErrHelp - case f.ParseErrorsWhitelist.UnknownFlags: - // --unknown=unknownval arg ... - // we do not want to lose arg in this case - if len(split) >= 2 { - return a, nil - } - - return stripUnknownFlagValue(a), nil - default: - err = f.failf("unknown flag: --%s", name) - return - } - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - outArgs = args - - if strings.HasPrefix(shorthands, "test.") { - return - } - - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - switch { - case c == 'h': - f.usage() - err = ErrHelp - return - case f.ParseErrorsWhitelist.UnknownFlags: - // '-f=arg arg ...' - // we do not want to lose arg in this case - if len(shorthands) > 2 && shorthands[1] == '=' { - outShorts = "" - return - } - - outArgs = stripUnknownFlagValue(outArgs) - return - default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - if f.addedGoFlagSets != nil { - for _, goFlagSet := range f.addedGoFlagSets { - goFlagSet.Parse(nil) - } - } - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - fmt.Println(err) - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f7..000000000 --- a/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go deleted file mode 100644 index caa352741..000000000 --- a/vendor/github.com/spf13/pflag/float32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float32Slice Value -type float32SliceValue struct { - value *[]float32 - changed bool -} - -func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { - isv := new(float32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return err - } - out[i] = float32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float32SliceValue) Type() string { - return "float32Slice" -} - -func (s *float32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float32SliceValue) fromString(val string) (float32, error) { - t64, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(t64), nil -} - -func (s *float32SliceValue) toString(val float32) string { - return fmt.Sprintf("%f", val) -} - -func (s *float32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float32SliceValue) Replace(val []string) error { - out := make([]float32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float32{}, nil - } - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return nil, err - } - out[i] = float32(temp64) - - } - return out, nil -} - -// GetFloat32Slice return the []float32 value of a flag with the given name -func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { - val, err := f.getFlagType(name, "float32Slice", float32SliceConv) - if err != nil { - return []float32{}, err - } - return val.([]float32), nil -} - -// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. -// The argument p points to a []float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. -// The argument p points to a float32[] variable in which to store the value of the flag. -func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func Float32Slice(name string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, "", value, usage) -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a7..000000000 --- a/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go deleted file mode 100644 index 85bf3073d..000000000 --- a/vendor/github.com/spf13/pflag/float64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float64Slice Value -type float64SliceValue struct { - value *[]float64 - changed bool -} - -func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { - isv := new(float64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float64SliceValue) Type() string { - return "float64Slice" -} - -func (s *float64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float64SliceValue) fromString(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -func (s *float64SliceValue) toString(val float64) string { - return fmt.Sprintf("%f", val) -} - -func (s *float64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float64SliceValue) Replace(val []string) error { - out := make([]float64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float64{}, nil - } - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetFloat64Slice return the []float64 value of a flag with the given name -func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { - val, err := f.getFlagType(name, "float64Slice", float64SliceConv) - if err != nil { - return []float64{}, err - } - return val.([]float64), nil -} - -// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. -// The argument p points to a []float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. -// The argument p points to a float64[] variable in which to store the value of the flag. -func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func Float64Slice(name string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, "", value, usage) -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod deleted file mode 100644 index b2287eec1..000000000 --- a/vendor/github.com/spf13/pflag/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/pflag - -go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index d3dd72b7f..000000000 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) - if f.addedGoFlagSets == nil { - f.addedGoFlagSets = make([]*goflag.FlagSet, 0) - } - f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) -} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89df..000000000 --- a/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go deleted file mode 100644 index f1a01d05e..000000000 --- a/vendor/github.com/spf13/pflag/int16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int16 Value -type int16Value int16 - -func newInt16Value(val int16, p *int16) *int16Value { - *p = val - return (*int16Value)(p) -} - -func (i *int16Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 16) - *i = int16Value(v) - return err -} - -func (i *int16Value) Type() string { - return "int16" -} - -func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 16) - if err != nil { - return 0, err - } - return int16(v), nil -} - -// GetInt16 returns the int16 value of a flag with the given name -func (f *FlagSet) GetInt16(name string) (int16, error) { - val, err := f.getFlagType(name, "int16", int16Conv) - if err != nil { - return 0, err - } - return val.(int16), nil -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func Int16Var(p *int16, name string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, "", value, usage) - return p -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, shorthand, value, usage) - return p -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func Int16(name string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, "", value, usage) -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func Int16P(name, shorthand string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f0..000000000 --- a/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go deleted file mode 100644 index ff128ff06..000000000 --- a/vendor/github.com/spf13/pflag/int32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int32Slice Value -type int32SliceValue struct { - value *[]int32 - changed bool -} - -func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { - isv := new(int32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return err - } - out[i] = int32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int32SliceValue) Type() string { - return "int32Slice" -} - -func (s *int32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int32SliceValue) fromString(val string) (int32, error) { - t64, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(t64), nil -} - -func (s *int32SliceValue) toString(val int32) string { - return fmt.Sprintf("%d", val) -} - -func (s *int32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int32SliceValue) Replace(val []string) error { - out := make([]int32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int32{}, nil - } - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return nil, err - } - out[i] = int32(temp64) - - } - return out, nil -} - -// GetInt32Slice return the []int32 value of a flag with the given name -func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { - val, err := f.getFlagType(name, "int32Slice", int32SliceConv) - if err != nil { - return []int32{}, err - } - return val.([]int32), nil -} - -// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. -// The argument p points to a []int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. -// The argument p points to a int32[] variable in which to store the value of the flag. -func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func Int32Slice(name string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, "", value, usage) -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781d..000000000 --- a/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go deleted file mode 100644 index 25464638f..000000000 --- a/vendor/github.com/spf13/pflag/int64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int64Slice Value -type int64SliceValue struct { - value *[]int64 - changed bool -} - -func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { - isv := new(int64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int64SliceValue) Type() string { - return "int64Slice" -} - -func (s *int64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int64SliceValue) fromString(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -func (s *int64SliceValue) toString(val int64) string { - return fmt.Sprintf("%d", val) -} - -func (s *int64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int64SliceValue) Replace(val []string) error { - out := make([]int64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int64{}, nil - } - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetInt64Slice return the []int64 value of a flag with the given name -func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { - val, err := f.getFlagType(name, "int64Slice", int64SliceConv) - if err != nil { - return []int64{}, err - } - return val.([]int64), nil -} - -// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. -// The argument p points to a []int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. -// The argument p points to a int64[] variable in which to store the value of the flag. -func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func Int64Slice(name string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, "", value, usage) -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228e..000000000 --- a/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index e71c39d91..000000000 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,158 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *intSliceValue) Append(val string) error { - i, err := strconv.Atoi(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *intSliceValue) Replace(val []string) error { - out := make([]int, len(val)) - for i, d := range val { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *intSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = strconv.Itoa(d) - } - return out -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 3d414ba69..000000000 --- a/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 775faae4f..000000000 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,186 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func (s *ipSliceValue) fromString(val string) (net.IP, error) { - return net.ParseIP(strings.TrimSpace(val)), nil -} - -func (s *ipSliceValue) toString(val net.IP) string { - return val.String() -} - -func (s *ipSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *ipSliceValue) Replace(val []string) error { - out := make([]net.IP, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *ipSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd21..000000000 --- a/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bcd..000000000 --- a/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26ff..000000000 --- a/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index 4894af818..000000000 --- a/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,129 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringArrayValue) Replace(val []string) error { - out := make([]string, len(val)) - for i, d := range val { - var err error - out[i] = d - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *stringArrayValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = d - } - return out -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 3cb2e69db..000000000 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,163 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func (s *stringSliceValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringSliceValue) Replace(val []string) error { - *s.value = val - return nil -} - -func (s *stringSliceValue) GetSlice() []string { - return *s.value -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go deleted file mode 100644 index 5ceda3965..000000000 --- a/vendor/github.com/spf13/pflag/string_to_int.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt Value -type stringToIntValue struct { - value *map[string]int - changed bool -} - -func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { - ssv := new(stringToIntValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToIntValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToIntValue) Type() string { - return "stringToInt" -} - -func (s *stringToIntValue) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.Itoa(v)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToIntConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt return the map[string]int value of a flag with the given name -func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { - val, err := f.getFlagType(name, "stringToInt", stringToIntConv) - if err != nil { - return map[string]int{}, err - } - return val.(map[string]int), nil -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, "", value, usage) - return &p -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt(name string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, "", value, usage) -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go deleted file mode 100644 index a807a04a0..000000000 --- a/vendor/github.com/spf13/pflag/string_to_int64.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt64 Value -type stringToInt64Value struct { - value *map[string]int64 - changed bool -} - -func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { - ssv := new(stringToInt64Value) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToInt64Value) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToInt64Value) Type() string { - return "stringToInt64" -} - -func (s *stringToInt64Value) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.FormatInt(v, 10)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToInt64Conv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int64{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt64 return the map[string]int64 value of a flag with the given name -func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { - val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) - if err != nil { - return map[string]int64{}, err - } - return val.(map[string]int64), nil -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, "", value, usage) - return &p -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, "", value, usage) -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go deleted file mode 100644 index 890a01afc..000000000 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ /dev/null @@ -1,160 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "fmt" - "strings" -) - -// -- stringToString Value -type stringToStringValue struct { - value *map[string]string - changed bool -} - -func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { - ssv := new(stringToStringValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToStringValue) Set(val string) error { - var ss []string - n := strings.Count(val, "=") - switch n { - case 0: - return fmt.Errorf("%s must be formatted as key=value", val) - case 1: - ss = append(ss, strings.Trim(val, `"`)) - default: - r := csv.NewReader(strings.NewReader(val)) - var err error - ss, err = r.Read() - if err != nil { - return err - } - } - - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToStringValue) Type() string { - return "stringToString" -} - -func (s *stringToStringValue) String() string { - records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { - records = append(records, k+"="+v) - } - - var buf bytes.Buffer - w := csv.NewWriter(&buf) - if err := w.Write(records); err != nil { - panic(err) - } - w.Flush() - return "[" + strings.TrimSpace(buf.String()) + "]" -} - -func stringToStringConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]string{}, nil - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil, err - } - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - return out, nil -} - -// GetStringToString return the map[string]string value of a flag with the given name -func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { - val, err := f.getFlagType(name, "stringToString", stringToStringConv) - if err != nil { - return map[string]string{}, err - } - return val.(map[string]string), nil -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, "", value, usage) - return &p -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToString(name string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, "", value, usage) -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b758..000000000 --- a/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914edd..000000000 --- a/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539b..000000000 --- a/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2c..000000000 --- a/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1f..000000000 --- a/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index 5fa924835..000000000 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,168 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *uintSliceValue) fromString(val string) (uint, error) { - t, err := strconv.ParseUint(val, 10, 0) - if err != nil { - return 0, err - } - return uint(t), nil -} - -func (s *uintSliceValue) toString(val uint) string { - return fmt.Sprintf("%d", val) -} - -func (s *uintSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *uintSliceValue) Replace(val []string) error { - out := make([]uint, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *uintSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml deleted file mode 100644 index 010d4ccd5..000000000 --- a/vendor/github.com/stretchr/objx/.codeclimate.yml +++ /dev/null @@ -1,13 +0,0 @@ -engines: - gofmt: - enabled: true - golint: - enabled: true - govet: - enabled: true - -exclude_patterns: -- ".github/" -- "vendor/" -- "codegen/" -- "doc.go" diff --git a/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/objx/.gitignore deleted file mode 100644 index ea58090bd..000000000 --- a/vendor/github.com/stretchr/objx/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml deleted file mode 100644 index a63efa59d..000000000 --- a/vendor/github.com/stretchr/objx/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -go: - - 1.8 - - 1.9 - - tip - -env: - global: - - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00 - -before_script: - - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter - - chmod +x ./cc-test-reporter - - ./cc-test-reporter before-build - -install: -- go get github.com/go-task/task/cmd/task - -script: -- task dl-deps -- task lint -- task test-coverage - -after_script: - - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock deleted file mode 100644 index eebe342a9..000000000 --- a/vendor/github.com/stretchr/objx/Gopkg.lock +++ /dev/null @@ -1,30 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" - version = "v1.2.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml deleted file mode 100644 index d70f1570b..000000000 --- a/vendor/github.com/stretchr/objx/Gopkg.toml +++ /dev/null @@ -1,8 +0,0 @@ -[prune] - unused-packages = true - non-go = true - go-tests = true - -[[constraint]] - name = "github.com/stretchr/testify" - version = "~1.2.0" diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE deleted file mode 100644 index 44d4d9d5a..000000000 --- a/vendor/github.com/stretchr/objx/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Stretchr, Inc. -Copyright (c) 2017-2018 objx contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md deleted file mode 100644 index be5750c94..000000000 --- a/vendor/github.com/stretchr/objx/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Objx -[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) -[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) -[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) -[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) -[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) - -Objx - Go package for dealing with maps, slices, JSON and other data. - -Get started: - -- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx - -## Overview -Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. - -### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - - m, err := objx.FromJSON(json) - -NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. - -Use `Get` to access the value you're interested in. You can use dot and array -notation too: - - m.Get("places[0].latlng") - -Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - - if m.Get("code").IsStr() { // Your code... } - -Or you can just assume the type, and use one of the strong type methods to extract the real value: - - m.Get("code").Int() - -If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - - Get("code").Int(-1) - -If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. - -### Reading data -A simple example of how to use Objx: - - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() - - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) - -### Ranging -Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } - -## Installation -To install Objx, use go get: - - go get github.com/stretchr/objx - -### Staying up to date -To update Objx to the latest version, run: - - go get -u github.com/stretchr/objx - -### Supported go versions -We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment. - -## Contributing -Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml deleted file mode 100644 index f8035641f..000000000 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ /dev/null @@ -1,32 +0,0 @@ -default: - deps: [test] - -dl-deps: - desc: Downloads cli dependencies - cmds: - - go get -u github.com/golang/lint/golint - - go get -u github.com/golang/dep/cmd/dep - -update-deps: - desc: Updates dependencies - cmds: - - dep ensure - - dep ensure -update - -lint: - desc: Runs golint - cmds: - - go fmt $(go list ./... | grep -v /vendor/) - - go vet $(go list ./... | grep -v /vendor/) - - golint $(ls *.go | grep -v "doc.go") - silent: true - -test: - desc: Runs go tests - cmds: - - go test -race . - -test-coverage: - desc: Runs go tests and calucates test coverage - cmds: - - go test -coverprofile=c.out . diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go deleted file mode 100644 index 204356a22..000000000 --- a/vendor/github.com/stretchr/objx/accessors.go +++ /dev/null @@ -1,148 +0,0 @@ -package objx - -import ( - "regexp" - "strconv" - "strings" -) - -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` - -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) - -// Get gets the value using the specified selector and -// returns it inside a new Obj object. -// -// If it cannot find the value, Get will return a nil -// value inside an instance of Obj. -// -// Get can only operate directly on map[string]interface{} and []interface. -// -// Example -// -// To access the title of the third chapter of the second book, do: -// -// o.Get("books[1].chapters[2].title") -func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false) - return &Value{data: rawObj} -} - -// Set sets the value using the specified selector and -// returns the object on which Set was called. -// -// Set can only operate directly on map[string]interface{} and []interface -// -// Example -// -// To set the title of the third chapter of the second book, do: -// -// o.Set("books[1].chapters[2].title","Time to Go") -func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true) - return m -} - -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet bool) interface{} { - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - if index >= len(array) { - return nil - } - return array[index] - } - return nil - - case string: - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - if strings.Contains(thisSel, "[") { - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - if len(arrayMatches) > 0 { - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } - } - } - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) - } - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } - current = curMSI[thisSel] - default: - current = nil - } - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - current = nil - } - } - } - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet) - } - } - return current -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - return 0 - } - return value -} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a25..000000000 --- a/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go deleted file mode 100644 index 5e020f310..000000000 --- a/vendor/github.com/stretchr/objx/conversions.go +++ /dev/null @@ -1,108 +0,0 @@ -package objx - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// JSON converts the contained object to a JSON string -// representation -func (m Map) JSON() (string, error) { - result, err := json.Marshal(m) - if err != nil { - err = errors.New("objx: JSON encode failed with: " + err.Error()) - } - return string(result), err -} - -// MustJSON converts the contained object to a JSON string -// representation and panics if there is an error -func (m Map) MustJSON() string { - result, err := m.JSON() - if err != nil { - panic(err.Error()) - } - return result -} - -// Base64 converts the contained object to a Base64 string -// representation of the JSON string representation -func (m Map) Base64() (string, error) { - var buf bytes.Buffer - - jsonData, err := m.JSON() - if err != nil { - return "", err - } - - encoder := base64.NewEncoder(base64.StdEncoding, &buf) - _, err = encoder.Write([]byte(jsonData)) - if err != nil { - return "", err - } - _ = encoder.Close() - - return buf.String(), nil -} - -// MustBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and panics -// if there is an error -func (m Map) MustBase64() string { - result, err := m.Base64() - if err != nil { - panic(err.Error()) - } - return result -} - -// SignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key. -func (m Map) SignedBase64(key string) (string, error) { - base64, err := m.Base64() - if err != nil { - return "", err - } - - sig := HashWithKey(base64, key) - return base64 + SignatureSeparator + sig, nil -} - -// MustSignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key and panics if there is an error -func (m Map) MustSignedBase64(key string) string { - result, err := m.SignedBase64(key) - if err != nil { - panic(err.Error()) - } - return result -} - -/* - URL Query - ------------------------------------------------ -*/ - -// URLValues creates a url.Values object from an Obj. This -// function requires that the wrapped object be a map[string]interface{} -func (m Map) URLValues() url.Values { - vals := make(url.Values) - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } - return vals -} - -// URLQuery gets an encoded URL query representing the given -// Obj. This function requires that the wrapped object be a -// map[string]interface{} -func (m Map) URLQuery() (string, error) { - return m.URLValues().Encode(), nil -} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go deleted file mode 100644 index 6d6af1a83..000000000 --- a/vendor/github.com/stretchr/objx/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Objx - Go package for dealing with maps, slices, JSON and other data. - -Overview - -Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -a powerful `Get` method (among others) that allows you to easily and quickly get -access to data within the map, without having to worry too much about type assertions, -missing data, default values etc. - -Pattern - -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. -Call one of the `objx.` functions to create your `objx.Map` to get going: - - m, err := objx.FromJSON(json) - -NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -the rest will be optimistic and try to figure things out without panicking. - -Use `Get` to access the value you're interested in. You can use dot and array -notation too: - - m.Get("places[0].latlng") - -Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - - if m.Get("code").IsStr() { // Your code... } - -Or you can just assume the type, and use one of the strong type methods to extract the real value: - - m.Get("code").Int() - -If there's no value there (or if it's the wrong type) then a default value will be returned, -or you can be explicit about the default value. - - Get("code").Int(-1) - -If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, -manipulating and selecting that data. You can find out more by exploring the index below. - -Reading data - -A simple example of how to use Objx: - - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() - - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) - -Ranging - -Since `objx.Map` is a `map[string]interface{}` you can treat it as such. -For example, to `range` the data, do what you would expect: - - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } -*/ -package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go deleted file mode 100644 index 406bc8926..000000000 --- a/vendor/github.com/stretchr/objx/map.go +++ /dev/null @@ -1,190 +0,0 @@ -package objx - -import ( - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "net/url" - "strings" -) - -// MSIConvertable is an interface that defines methods for converting your -// custom types to a map[string]interface{} representation. -type MSIConvertable interface { - // MSI gets a map[string]interface{} (msi) representing the - // object. - MSI() map[string]interface{} -} - -// Map provides extended functionality for working with -// untyped data, in particular map[string]interface (msi). -type Map map[string]interface{} - -// Value returns the internal value instance -func (m Map) Value() *Value { - return &Value{data: m} -} - -// Nil represents a nil Map. -var Nil = New(nil) - -// New creates a new Map containing the map[string]interface{} in the data argument. -// If the data argument is not a map[string]interface, New attempts to call the -// MSI() method on the MSIConvertable interface to create one. -func New(data interface{}) Map { - if _, ok := data.(map[string]interface{}); !ok { - if converter, ok := data.(MSIConvertable); ok { - data = converter.MSI() - } else { - return nil - } - } - return Map(data.(map[string]interface{})) -} - -// MSI creates a map[string]interface{} and puts it inside a new Map. -// -// The arguments follow a key, value pattern. -// -// -// Returns nil if any key argument is non-string or if there are an odd number of arguments. -// -// Example -// -// To easily create Maps: -// -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) -// -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} -func MSI(keyAndValuePairs ...interface{}) Map { - newMap := Map{} - keyAndValuePairsLen := len(keyAndValuePairs) - if keyAndValuePairsLen%2 != 0 { - return nil - } - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - key := keyAndValuePairs[i] - value := keyAndValuePairs[i+1] - - // make sure the key is a string - keyString, keyStringOK := key.(string) - if !keyStringOK { - return nil - } - newMap[keyString] = value - } - return newMap -} - -// ****** Conversion Constructors - -// MustFromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Panics if the JSON is invalid. -func MustFromJSON(jsonString string) Map { - o, err := FromJSON(jsonString) - if err != nil { - panic("objx: MustFromJSON failed with error: " + err.Error()) - } - return o -} - -// FromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Returns an error if the JSON is invalid. -func FromJSON(jsonString string) (Map, error) { - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - if err != nil { - return Nil, err - } - return New(data), nil -} - -// FromBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by Base64 -func FromBase64(base64String string) (Map, error) { - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - decoded, err := ioutil.ReadAll(decoder) - if err != nil { - return nil, err - } - return FromJSON(string(decoded)) -} - -// MustFromBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromBase64(base64String string) Map { - result, err := FromBase64(base64String) - if err != nil { - panic("objx: MustFromBase64 failed with error: " + err.Error()) - } - return result -} - -// FromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by SignedBase64 -func FromSignedBase64(base64String, key string) (Map, error) { - parts := strings.Split(base64String, SignatureSeparator) - if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed") - } - - sig := HashWithKey(parts[0], key) - if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match") - } - return FromBase64(parts[0]) -} - -// MustFromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromSignedBase64(base64String, key string) Map { - result, err := FromSignedBase64(base64String, key) - if err != nil { - panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) - } - return result -} - -// FromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -func FromURLQuery(query string) (Map, error) { - vals, err := url.ParseQuery(query) - if err != nil { - return nil, err - } - m := Map{} - for k, vals := range vals { - m[k] = vals[0] - } - return m, nil -} - -// MustFromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -// -// Panics if it encounters an error -func MustFromURLQuery(query string) Map { - o, err := FromURLQuery(query) - if err != nil { - panic("objx: MustFromURLQuery failed with error: " + err.Error()) - } - return o -} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go deleted file mode 100644 index c3400a3f7..000000000 --- a/vendor/github.com/stretchr/objx/mutations.go +++ /dev/null @@ -1,77 +0,0 @@ -package objx - -// Exclude returns a new Map with the keys in the specified []string -// excluded. -func (m Map) Exclude(exclude []string) Map { - excluded := make(Map) - for k, v := range m { - if !contains(exclude, k) { - excluded[k] = v - } - } - return excluded -} - -// Copy creates a shallow copy of the Obj. -func (m Map) Copy() Map { - copied := Map{} - for k, v := range m { - copied[k] = v - } - return copied -} - -// Merge blends the specified map with a copy of this map and returns the result. -// -// Keys that appear in both will be selected from the specified map. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) Merge(merge Map) Map { - return m.Copy().MergeHere(merge) -} - -// MergeHere blends the specified map with this map and returns the current map. -// -// Keys that appear in both will be selected from the specified map. The original map -// will be modified. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) MergeHere(merge Map) Map { - for k, v := range merge { - m[k] = v - } - return m -} - -// Transform builds a new Obj giving the transformer a chance -// to change the keys and values as it goes. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := Map{} - for k, v := range m { - modifiedKey, modifiedVal := transformer(k, v) - newMap[modifiedKey] = modifiedVal - } - return newMap -} - -// TransformKeys builds a new map using the specified key mapping. -// -// Unspecified keys will be unaltered. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) TransformKeys(mapping map[string]string) Map { - return m.Transform(func(key string, value interface{}) (string, interface{}) { - if newKey, ok := mapping[key]; ok { - return newKey, value - } - return key, value - }) -} - -// Checks if a string slice contains a string -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go deleted file mode 100644 index 692be8e2a..000000000 --- a/vendor/github.com/stretchr/objx/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package objx - -import ( - "crypto/sha1" - "encoding/hex" -) - -// HashWithKey hashes the specified string using the security key -func HashWithKey(data, key string) string { - d := sha1.Sum([]byte(data + ":" + key)) - return hex.EncodeToString(d[:]) -} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go deleted file mode 100644 index d9e0b479a..000000000 --- a/vendor/github.com/stretchr/objx/tests.go +++ /dev/null @@ -1,17 +0,0 @@ -package objx - -// Has gets whether there is something at the specified selector -// or not. -// -// If m is nil, Has will always return false. -func (m Map) Has(selector string) bool { - if m == nil { - return false - } - return !m.Get(selector).IsNil() -} - -// IsNil gets whether the data is nil or not. -func (v *Value) IsNil() bool { - return v == nil || v.data == nil -} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go deleted file mode 100644 index 202a91f8c..000000000 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ /dev/null @@ -1,2501 +0,0 @@ -package objx - -/* - Inter (interface{} and []interface{}) -*/ - -// Inter gets the value as a interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Inter(optionalDefault ...interface{}) interface{} { - if s, ok := v.data.(interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInter gets the value as a interface{}. -// -// Panics if the object is not a interface{}. -func (v *Value) MustInter() interface{} { - return v.data.(interface{}) -} - -// InterSlice gets the value as a []interface{}, returns the optionalDefault -// value or nil if the value is not a []interface{}. -func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { - if s, ok := v.data.([]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInterSlice gets the value as a []interface{}. -// -// Panics if the object is not a []interface{}. -func (v *Value) MustInterSlice() []interface{} { - return v.data.([]interface{}) -} - -// IsInter gets whether the object contained is a interface{} or not. -func (v *Value) IsInter() bool { - _, ok := v.data.(interface{}) - return ok -} - -// IsInterSlice gets whether the object contained is a []interface{} or not. -func (v *Value) IsInterSlice() bool { - _, ok := v.data.([]interface{}) - return ok -} - -// EachInter calls the specified callback for each object -// in the []interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - for index, val := range v.MustInterSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInter uses the specified decider function to select items -// from the []interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - var selected []interface{} - v.EachInter(func(index int, val interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInter uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]interface{}. -func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - groups := make(map[string][]interface{}) - v.EachInter(func(index int, val interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInter uses the specified function to replace each interface{}s -// by iterating each item. The data in the returned result will be a -// []interface{} containing the replaced items. -func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() - replaced := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInter uses the specified collector function to collect a value -// for each of the interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() - collected := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - var selected []map[string]interface{} - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - groups := make(map[string][]map[string]interface{}) - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - var selected [](Map) - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - groups := make(map[string][](Map)) - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Bool (bool and []bool) -*/ - -// Bool gets the value as a bool, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Bool(optionalDefault ...bool) bool { - if s, ok := v.data.(bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return false -} - -// MustBool gets the value as a bool. -// -// Panics if the object is not a bool. -func (v *Value) MustBool() bool { - return v.data.(bool) -} - -// BoolSlice gets the value as a []bool, returns the optionalDefault -// value or nil if the value is not a []bool. -func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { - if s, ok := v.data.([]bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustBoolSlice gets the value as a []bool. -// -// Panics if the object is not a []bool. -func (v *Value) MustBoolSlice() []bool { - return v.data.([]bool) -} - -// IsBool gets whether the object contained is a bool or not. -func (v *Value) IsBool() bool { - _, ok := v.data.(bool) - return ok -} - -// IsBoolSlice gets whether the object contained is a []bool or not. -func (v *Value) IsBoolSlice() bool { - _, ok := v.data.([]bool) - return ok -} - -// EachBool calls the specified callback for each object -// in the []bool. -// -// Panics if the object is the wrong type. -func (v *Value) EachBool(callback func(int, bool) bool) *Value { - for index, val := range v.MustBoolSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereBool uses the specified decider function to select items -// from the []bool. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - var selected []bool - v.EachBool(func(index int, val bool) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupBool uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]bool. -func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - groups := make(map[string][]bool) - v.EachBool(func(index int, val bool) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]bool, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceBool uses the specified function to replace each bools -// by iterating each item. The data in the returned result will be a -// []bool containing the replaced items. -func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - arr := v.MustBoolSlice() - replaced := make([]bool, len(arr)) - v.EachBool(func(index int, val bool) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectBool uses the specified collector function to collect a value -// for each of the bools in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - arr := v.MustBoolSlice() - collected := make([]interface{}, len(arr)) - v.EachBool(func(index int, val bool) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Str (string and []string) -*/ - -// Str gets the value as a string, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Str(optionalDefault ...string) string { - if s, ok := v.data.(string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return "" -} - -// MustStr gets the value as a string. -// -// Panics if the object is not a string. -func (v *Value) MustStr() string { - return v.data.(string) -} - -// StrSlice gets the value as a []string, returns the optionalDefault -// value or nil if the value is not a []string. -func (v *Value) StrSlice(optionalDefault ...[]string) []string { - if s, ok := v.data.([]string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustStrSlice gets the value as a []string. -// -// Panics if the object is not a []string. -func (v *Value) MustStrSlice() []string { - return v.data.([]string) -} - -// IsStr gets whether the object contained is a string or not. -func (v *Value) IsStr() bool { - _, ok := v.data.(string) - return ok -} - -// IsStrSlice gets whether the object contained is a []string or not. -func (v *Value) IsStrSlice() bool { - _, ok := v.data.([]string) - return ok -} - -// EachStr calls the specified callback for each object -// in the []string. -// -// Panics if the object is the wrong type. -func (v *Value) EachStr(callback func(int, string) bool) *Value { - for index, val := range v.MustStrSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereStr uses the specified decider function to select items -// from the []string. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereStr(decider func(int, string) bool) *Value { - var selected []string - v.EachStr(func(index int, val string) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupStr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]string. -func (v *Value) GroupStr(grouper func(int, string) string) *Value { - groups := make(map[string][]string) - v.EachStr(func(index int, val string) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]string, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceStr uses the specified function to replace each strings -// by iterating each item. The data in the returned result will be a -// []string containing the replaced items. -func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - arr := v.MustStrSlice() - replaced := make([]string, len(arr)) - v.EachStr(func(index int, val string) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectStr uses the specified collector function to collect a value -// for each of the strings in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - arr := v.MustStrSlice() - collected := make([]interface{}, len(arr)) - v.EachStr(func(index int, val string) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int (int and []int) -*/ - -// Int gets the value as a int, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int(optionalDefault ...int) int { - if s, ok := v.data.(int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt gets the value as a int. -// -// Panics if the object is not a int. -func (v *Value) MustInt() int { - return v.data.(int) -} - -// IntSlice gets the value as a []int, returns the optionalDefault -// value or nil if the value is not a []int. -func (v *Value) IntSlice(optionalDefault ...[]int) []int { - if s, ok := v.data.([]int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustIntSlice gets the value as a []int. -// -// Panics if the object is not a []int. -func (v *Value) MustIntSlice() []int { - return v.data.([]int) -} - -// IsInt gets whether the object contained is a int or not. -func (v *Value) IsInt() bool { - _, ok := v.data.(int) - return ok -} - -// IsIntSlice gets whether the object contained is a []int or not. -func (v *Value) IsIntSlice() bool { - _, ok := v.data.([]int) - return ok -} - -// EachInt calls the specified callback for each object -// in the []int. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt(callback func(int, int) bool) *Value { - for index, val := range v.MustIntSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt uses the specified decider function to select items -// from the []int. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt(decider func(int, int) bool) *Value { - var selected []int - v.EachInt(func(index int, val int) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int. -func (v *Value) GroupInt(grouper func(int, int) string) *Value { - groups := make(map[string][]int) - v.EachInt(func(index int, val int) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt uses the specified function to replace each ints -// by iterating each item. The data in the returned result will be a -// []int containing the replaced items. -func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - arr := v.MustIntSlice() - replaced := make([]int, len(arr)) - v.EachInt(func(index int, val int) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt uses the specified collector function to collect a value -// for each of the ints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - arr := v.MustIntSlice() - collected := make([]interface{}, len(arr)) - v.EachInt(func(index int, val int) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int8 (int8 and []int8) -*/ - -// Int8 gets the value as a int8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int8(optionalDefault ...int8) int8 { - if s, ok := v.data.(int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt8 gets the value as a int8. -// -// Panics if the object is not a int8. -func (v *Value) MustInt8() int8 { - return v.data.(int8) -} - -// Int8Slice gets the value as a []int8, returns the optionalDefault -// value or nil if the value is not a []int8. -func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { - if s, ok := v.data.([]int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt8Slice gets the value as a []int8. -// -// Panics if the object is not a []int8. -func (v *Value) MustInt8Slice() []int8 { - return v.data.([]int8) -} - -// IsInt8 gets whether the object contained is a int8 or not. -func (v *Value) IsInt8() bool { - _, ok := v.data.(int8) - return ok -} - -// IsInt8Slice gets whether the object contained is a []int8 or not. -func (v *Value) IsInt8Slice() bool { - _, ok := v.data.([]int8) - return ok -} - -// EachInt8 calls the specified callback for each object -// in the []int8. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - for index, val := range v.MustInt8Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt8 uses the specified decider function to select items -// from the []int8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - var selected []int8 - v.EachInt8(func(index int, val int8) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int8. -func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - groups := make(map[string][]int8) - v.EachInt8(func(index int, val int8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt8 uses the specified function to replace each int8s -// by iterating each item. The data in the returned result will be a -// []int8 containing the replaced items. -func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - arr := v.MustInt8Slice() - replaced := make([]int8, len(arr)) - v.EachInt8(func(index int, val int8) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt8 uses the specified collector function to collect a value -// for each of the int8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - arr := v.MustInt8Slice() - collected := make([]interface{}, len(arr)) - v.EachInt8(func(index int, val int8) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int16 (int16 and []int16) -*/ - -// Int16 gets the value as a int16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int16(optionalDefault ...int16) int16 { - if s, ok := v.data.(int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt16 gets the value as a int16. -// -// Panics if the object is not a int16. -func (v *Value) MustInt16() int16 { - return v.data.(int16) -} - -// Int16Slice gets the value as a []int16, returns the optionalDefault -// value or nil if the value is not a []int16. -func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { - if s, ok := v.data.([]int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt16Slice gets the value as a []int16. -// -// Panics if the object is not a []int16. -func (v *Value) MustInt16Slice() []int16 { - return v.data.([]int16) -} - -// IsInt16 gets whether the object contained is a int16 or not. -func (v *Value) IsInt16() bool { - _, ok := v.data.(int16) - return ok -} - -// IsInt16Slice gets whether the object contained is a []int16 or not. -func (v *Value) IsInt16Slice() bool { - _, ok := v.data.([]int16) - return ok -} - -// EachInt16 calls the specified callback for each object -// in the []int16. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - for index, val := range v.MustInt16Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt16 uses the specified decider function to select items -// from the []int16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - var selected []int16 - v.EachInt16(func(index int, val int16) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int16. -func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - groups := make(map[string][]int16) - v.EachInt16(func(index int, val int16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt16 uses the specified function to replace each int16s -// by iterating each item. The data in the returned result will be a -// []int16 containing the replaced items. -func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - arr := v.MustInt16Slice() - replaced := make([]int16, len(arr)) - v.EachInt16(func(index int, val int16) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt16 uses the specified collector function to collect a value -// for each of the int16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - arr := v.MustInt16Slice() - collected := make([]interface{}, len(arr)) - v.EachInt16(func(index int, val int16) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int32 (int32 and []int32) -*/ - -// Int32 gets the value as a int32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int32(optionalDefault ...int32) int32 { - if s, ok := v.data.(int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt32 gets the value as a int32. -// -// Panics if the object is not a int32. -func (v *Value) MustInt32() int32 { - return v.data.(int32) -} - -// Int32Slice gets the value as a []int32, returns the optionalDefault -// value or nil if the value is not a []int32. -func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { - if s, ok := v.data.([]int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt32Slice gets the value as a []int32. -// -// Panics if the object is not a []int32. -func (v *Value) MustInt32Slice() []int32 { - return v.data.([]int32) -} - -// IsInt32 gets whether the object contained is a int32 or not. -func (v *Value) IsInt32() bool { - _, ok := v.data.(int32) - return ok -} - -// IsInt32Slice gets whether the object contained is a []int32 or not. -func (v *Value) IsInt32Slice() bool { - _, ok := v.data.([]int32) - return ok -} - -// EachInt32 calls the specified callback for each object -// in the []int32. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - for index, val := range v.MustInt32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt32 uses the specified decider function to select items -// from the []int32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - var selected []int32 - v.EachInt32(func(index int, val int32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int32. -func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - groups := make(map[string][]int32) - v.EachInt32(func(index int, val int32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt32 uses the specified function to replace each int32s -// by iterating each item. The data in the returned result will be a -// []int32 containing the replaced items. -func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - arr := v.MustInt32Slice() - replaced := make([]int32, len(arr)) - v.EachInt32(func(index int, val int32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt32 uses the specified collector function to collect a value -// for each of the int32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - arr := v.MustInt32Slice() - collected := make([]interface{}, len(arr)) - v.EachInt32(func(index int, val int32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Int64 (int64 and []int64) -*/ - -// Int64 gets the value as a int64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int64(optionalDefault ...int64) int64 { - if s, ok := v.data.(int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt64 gets the value as a int64. -// -// Panics if the object is not a int64. -func (v *Value) MustInt64() int64 { - return v.data.(int64) -} - -// Int64Slice gets the value as a []int64, returns the optionalDefault -// value or nil if the value is not a []int64. -func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { - if s, ok := v.data.([]int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt64Slice gets the value as a []int64. -// -// Panics if the object is not a []int64. -func (v *Value) MustInt64Slice() []int64 { - return v.data.([]int64) -} - -// IsInt64 gets whether the object contained is a int64 or not. -func (v *Value) IsInt64() bool { - _, ok := v.data.(int64) - return ok -} - -// IsInt64Slice gets whether the object contained is a []int64 or not. -func (v *Value) IsInt64Slice() bool { - _, ok := v.data.([]int64) - return ok -} - -// EachInt64 calls the specified callback for each object -// in the []int64. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - for index, val := range v.MustInt64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereInt64 uses the specified decider function to select items -// from the []int64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - var selected []int64 - v.EachInt64(func(index int, val int64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupInt64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int64. -func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - groups := make(map[string][]int64) - v.EachInt64(func(index int, val int64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceInt64 uses the specified function to replace each int64s -// by iterating each item. The data in the returned result will be a -// []int64 containing the replaced items. -func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - arr := v.MustInt64Slice() - replaced := make([]int64, len(arr)) - v.EachInt64(func(index int, val int64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectInt64 uses the specified collector function to collect a value -// for each of the int64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - arr := v.MustInt64Slice() - collected := make([]interface{}, len(arr)) - v.EachInt64(func(index int, val int64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint (uint and []uint) -*/ - -// Uint gets the value as a uint, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint(optionalDefault ...uint) uint { - if s, ok := v.data.(uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint gets the value as a uint. -// -// Panics if the object is not a uint. -func (v *Value) MustUint() uint { - return v.data.(uint) -} - -// UintSlice gets the value as a []uint, returns the optionalDefault -// value or nil if the value is not a []uint. -func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { - if s, ok := v.data.([]uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintSlice gets the value as a []uint. -// -// Panics if the object is not a []uint. -func (v *Value) MustUintSlice() []uint { - return v.data.([]uint) -} - -// IsUint gets whether the object contained is a uint or not. -func (v *Value) IsUint() bool { - _, ok := v.data.(uint) - return ok -} - -// IsUintSlice gets whether the object contained is a []uint or not. -func (v *Value) IsUintSlice() bool { - _, ok := v.data.([]uint) - return ok -} - -// EachUint calls the specified callback for each object -// in the []uint. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint(callback func(int, uint) bool) *Value { - for index, val := range v.MustUintSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint uses the specified decider function to select items -// from the []uint. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - var selected []uint - v.EachUint(func(index int, val uint) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint. -func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - groups := make(map[string][]uint) - v.EachUint(func(index int, val uint) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint uses the specified function to replace each uints -// by iterating each item. The data in the returned result will be a -// []uint containing the replaced items. -func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - arr := v.MustUintSlice() - replaced := make([]uint, len(arr)) - v.EachUint(func(index int, val uint) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint uses the specified collector function to collect a value -// for each of the uints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - arr := v.MustUintSlice() - collected := make([]interface{}, len(arr)) - v.EachUint(func(index int, val uint) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint8 (uint8 and []uint8) -*/ - -// Uint8 gets the value as a uint8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint8(optionalDefault ...uint8) uint8 { - if s, ok := v.data.(uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint8 gets the value as a uint8. -// -// Panics if the object is not a uint8. -func (v *Value) MustUint8() uint8 { - return v.data.(uint8) -} - -// Uint8Slice gets the value as a []uint8, returns the optionalDefault -// value or nil if the value is not a []uint8. -func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { - if s, ok := v.data.([]uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint8Slice gets the value as a []uint8. -// -// Panics if the object is not a []uint8. -func (v *Value) MustUint8Slice() []uint8 { - return v.data.([]uint8) -} - -// IsUint8 gets whether the object contained is a uint8 or not. -func (v *Value) IsUint8() bool { - _, ok := v.data.(uint8) - return ok -} - -// IsUint8Slice gets whether the object contained is a []uint8 or not. -func (v *Value) IsUint8Slice() bool { - _, ok := v.data.([]uint8) - return ok -} - -// EachUint8 calls the specified callback for each object -// in the []uint8. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - for index, val := range v.MustUint8Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint8 uses the specified decider function to select items -// from the []uint8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - var selected []uint8 - v.EachUint8(func(index int, val uint8) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint8. -func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - groups := make(map[string][]uint8) - v.EachUint8(func(index int, val uint8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint8 uses the specified function to replace each uint8s -// by iterating each item. The data in the returned result will be a -// []uint8 containing the replaced items. -func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - arr := v.MustUint8Slice() - replaced := make([]uint8, len(arr)) - v.EachUint8(func(index int, val uint8) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint8 uses the specified collector function to collect a value -// for each of the uint8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - arr := v.MustUint8Slice() - collected := make([]interface{}, len(arr)) - v.EachUint8(func(index int, val uint8) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint16 (uint16 and []uint16) -*/ - -// Uint16 gets the value as a uint16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint16(optionalDefault ...uint16) uint16 { - if s, ok := v.data.(uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint16 gets the value as a uint16. -// -// Panics if the object is not a uint16. -func (v *Value) MustUint16() uint16 { - return v.data.(uint16) -} - -// Uint16Slice gets the value as a []uint16, returns the optionalDefault -// value or nil if the value is not a []uint16. -func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { - if s, ok := v.data.([]uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint16Slice gets the value as a []uint16. -// -// Panics if the object is not a []uint16. -func (v *Value) MustUint16Slice() []uint16 { - return v.data.([]uint16) -} - -// IsUint16 gets whether the object contained is a uint16 or not. -func (v *Value) IsUint16() bool { - _, ok := v.data.(uint16) - return ok -} - -// IsUint16Slice gets whether the object contained is a []uint16 or not. -func (v *Value) IsUint16Slice() bool { - _, ok := v.data.([]uint16) - return ok -} - -// EachUint16 calls the specified callback for each object -// in the []uint16. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - for index, val := range v.MustUint16Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint16 uses the specified decider function to select items -// from the []uint16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - var selected []uint16 - v.EachUint16(func(index int, val uint16) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint16. -func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - groups := make(map[string][]uint16) - v.EachUint16(func(index int, val uint16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint16 uses the specified function to replace each uint16s -// by iterating each item. The data in the returned result will be a -// []uint16 containing the replaced items. -func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - arr := v.MustUint16Slice() - replaced := make([]uint16, len(arr)) - v.EachUint16(func(index int, val uint16) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint16 uses the specified collector function to collect a value -// for each of the uint16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - arr := v.MustUint16Slice() - collected := make([]interface{}, len(arr)) - v.EachUint16(func(index int, val uint16) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint32 (uint32 and []uint32) -*/ - -// Uint32 gets the value as a uint32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint32(optionalDefault ...uint32) uint32 { - if s, ok := v.data.(uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint32 gets the value as a uint32. -// -// Panics if the object is not a uint32. -func (v *Value) MustUint32() uint32 { - return v.data.(uint32) -} - -// Uint32Slice gets the value as a []uint32, returns the optionalDefault -// value or nil if the value is not a []uint32. -func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { - if s, ok := v.data.([]uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint32Slice gets the value as a []uint32. -// -// Panics if the object is not a []uint32. -func (v *Value) MustUint32Slice() []uint32 { - return v.data.([]uint32) -} - -// IsUint32 gets whether the object contained is a uint32 or not. -func (v *Value) IsUint32() bool { - _, ok := v.data.(uint32) - return ok -} - -// IsUint32Slice gets whether the object contained is a []uint32 or not. -func (v *Value) IsUint32Slice() bool { - _, ok := v.data.([]uint32) - return ok -} - -// EachUint32 calls the specified callback for each object -// in the []uint32. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - for index, val := range v.MustUint32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint32 uses the specified decider function to select items -// from the []uint32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - var selected []uint32 - v.EachUint32(func(index int, val uint32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint32. -func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - groups := make(map[string][]uint32) - v.EachUint32(func(index int, val uint32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint32 uses the specified function to replace each uint32s -// by iterating each item. The data in the returned result will be a -// []uint32 containing the replaced items. -func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - arr := v.MustUint32Slice() - replaced := make([]uint32, len(arr)) - v.EachUint32(func(index int, val uint32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint32 uses the specified collector function to collect a value -// for each of the uint32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - arr := v.MustUint32Slice() - collected := make([]interface{}, len(arr)) - v.EachUint32(func(index int, val uint32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uint64 (uint64 and []uint64) -*/ - -// Uint64 gets the value as a uint64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint64(optionalDefault ...uint64) uint64 { - if s, ok := v.data.(uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint64 gets the value as a uint64. -// -// Panics if the object is not a uint64. -func (v *Value) MustUint64() uint64 { - return v.data.(uint64) -} - -// Uint64Slice gets the value as a []uint64, returns the optionalDefault -// value or nil if the value is not a []uint64. -func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { - if s, ok := v.data.([]uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint64Slice gets the value as a []uint64. -// -// Panics if the object is not a []uint64. -func (v *Value) MustUint64Slice() []uint64 { - return v.data.([]uint64) -} - -// IsUint64 gets whether the object contained is a uint64 or not. -func (v *Value) IsUint64() bool { - _, ok := v.data.(uint64) - return ok -} - -// IsUint64Slice gets whether the object contained is a []uint64 or not. -func (v *Value) IsUint64Slice() bool { - _, ok := v.data.([]uint64) - return ok -} - -// EachUint64 calls the specified callback for each object -// in the []uint64. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - for index, val := range v.MustUint64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUint64 uses the specified decider function to select items -// from the []uint64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - var selected []uint64 - v.EachUint64(func(index int, val uint64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUint64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint64. -func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - groups := make(map[string][]uint64) - v.EachUint64(func(index int, val uint64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUint64 uses the specified function to replace each uint64s -// by iterating each item. The data in the returned result will be a -// []uint64 containing the replaced items. -func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - arr := v.MustUint64Slice() - replaced := make([]uint64, len(arr)) - v.EachUint64(func(index int, val uint64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUint64 uses the specified collector function to collect a value -// for each of the uint64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - arr := v.MustUint64Slice() - collected := make([]interface{}, len(arr)) - v.EachUint64(func(index int, val uint64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Uintptr (uintptr and []uintptr) -*/ - -// Uintptr gets the value as a uintptr, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { - if s, ok := v.data.(uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUintptr gets the value as a uintptr. -// -// Panics if the object is not a uintptr. -func (v *Value) MustUintptr() uintptr { - return v.data.(uintptr) -} - -// UintptrSlice gets the value as a []uintptr, returns the optionalDefault -// value or nil if the value is not a []uintptr. -func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { - if s, ok := v.data.([]uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintptrSlice gets the value as a []uintptr. -// -// Panics if the object is not a []uintptr. -func (v *Value) MustUintptrSlice() []uintptr { - return v.data.([]uintptr) -} - -// IsUintptr gets whether the object contained is a uintptr or not. -func (v *Value) IsUintptr() bool { - _, ok := v.data.(uintptr) - return ok -} - -// IsUintptrSlice gets whether the object contained is a []uintptr or not. -func (v *Value) IsUintptrSlice() bool { - _, ok := v.data.([]uintptr) - return ok -} - -// EachUintptr calls the specified callback for each object -// in the []uintptr. -// -// Panics if the object is the wrong type. -func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - for index, val := range v.MustUintptrSlice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereUintptr uses the specified decider function to select items -// from the []uintptr. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - var selected []uintptr - v.EachUintptr(func(index int, val uintptr) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupUintptr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uintptr. -func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - groups := make(map[string][]uintptr) - v.EachUintptr(func(index int, val uintptr) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uintptr, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceUintptr uses the specified function to replace each uintptrs -// by iterating each item. The data in the returned result will be a -// []uintptr containing the replaced items. -func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - arr := v.MustUintptrSlice() - replaced := make([]uintptr, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectUintptr uses the specified collector function to collect a value -// for each of the uintptrs in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - arr := v.MustUintptrSlice() - collected := make([]interface{}, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Float32 (float32 and []float32) -*/ - -// Float32 gets the value as a float32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float32(optionalDefault ...float32) float32 { - if s, ok := v.data.(float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat32 gets the value as a float32. -// -// Panics if the object is not a float32. -func (v *Value) MustFloat32() float32 { - return v.data.(float32) -} - -// Float32Slice gets the value as a []float32, returns the optionalDefault -// value or nil if the value is not a []float32. -func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { - if s, ok := v.data.([]float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat32Slice gets the value as a []float32. -// -// Panics if the object is not a []float32. -func (v *Value) MustFloat32Slice() []float32 { - return v.data.([]float32) -} - -// IsFloat32 gets whether the object contained is a float32 or not. -func (v *Value) IsFloat32() bool { - _, ok := v.data.(float32) - return ok -} - -// IsFloat32Slice gets whether the object contained is a []float32 or not. -func (v *Value) IsFloat32Slice() bool { - _, ok := v.data.([]float32) - return ok -} - -// EachFloat32 calls the specified callback for each object -// in the []float32. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - for index, val := range v.MustFloat32Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereFloat32 uses the specified decider function to select items -// from the []float32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - var selected []float32 - v.EachFloat32(func(index int, val float32) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupFloat32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float32. -func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - groups := make(map[string][]float32) - v.EachFloat32(func(index int, val float32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceFloat32 uses the specified function to replace each float32s -// by iterating each item. The data in the returned result will be a -// []float32 containing the replaced items. -func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - arr := v.MustFloat32Slice() - replaced := make([]float32, len(arr)) - v.EachFloat32(func(index int, val float32) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectFloat32 uses the specified collector function to collect a value -// for each of the float32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - arr := v.MustFloat32Slice() - collected := make([]interface{}, len(arr)) - v.EachFloat32(func(index int, val float32) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Float64 (float64 and []float64) -*/ - -// Float64 gets the value as a float64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float64(optionalDefault ...float64) float64 { - if s, ok := v.data.(float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat64 gets the value as a float64. -// -// Panics if the object is not a float64. -func (v *Value) MustFloat64() float64 { - return v.data.(float64) -} - -// Float64Slice gets the value as a []float64, returns the optionalDefault -// value or nil if the value is not a []float64. -func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { - if s, ok := v.data.([]float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat64Slice gets the value as a []float64. -// -// Panics if the object is not a []float64. -func (v *Value) MustFloat64Slice() []float64 { - return v.data.([]float64) -} - -// IsFloat64 gets whether the object contained is a float64 or not. -func (v *Value) IsFloat64() bool { - _, ok := v.data.(float64) - return ok -} - -// IsFloat64Slice gets whether the object contained is a []float64 or not. -func (v *Value) IsFloat64Slice() bool { - _, ok := v.data.([]float64) - return ok -} - -// EachFloat64 calls the specified callback for each object -// in the []float64. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - for index, val := range v.MustFloat64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereFloat64 uses the specified decider function to select items -// from the []float64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - var selected []float64 - v.EachFloat64(func(index int, val float64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupFloat64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float64. -func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - groups := make(map[string][]float64) - v.EachFloat64(func(index int, val float64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceFloat64 uses the specified function to replace each float64s -// by iterating each item. The data in the returned result will be a -// []float64 containing the replaced items. -func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - arr := v.MustFloat64Slice() - replaced := make([]float64, len(arr)) - v.EachFloat64(func(index int, val float64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectFloat64 uses the specified collector function to collect a value -// for each of the float64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - arr := v.MustFloat64Slice() - collected := make([]interface{}, len(arr)) - v.EachFloat64(func(index int, val float64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Complex64 (complex64 and []complex64) -*/ - -// Complex64 gets the value as a complex64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex64(optionalDefault ...complex64) complex64 { - if s, ok := v.data.(complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex64 gets the value as a complex64. -// -// Panics if the object is not a complex64. -func (v *Value) MustComplex64() complex64 { - return v.data.(complex64) -} - -// Complex64Slice gets the value as a []complex64, returns the optionalDefault -// value or nil if the value is not a []complex64. -func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { - if s, ok := v.data.([]complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex64Slice gets the value as a []complex64. -// -// Panics if the object is not a []complex64. -func (v *Value) MustComplex64Slice() []complex64 { - return v.data.([]complex64) -} - -// IsComplex64 gets whether the object contained is a complex64 or not. -func (v *Value) IsComplex64() bool { - _, ok := v.data.(complex64) - return ok -} - -// IsComplex64Slice gets whether the object contained is a []complex64 or not. -func (v *Value) IsComplex64Slice() bool { - _, ok := v.data.([]complex64) - return ok -} - -// EachComplex64 calls the specified callback for each object -// in the []complex64. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - for index, val := range v.MustComplex64Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereComplex64 uses the specified decider function to select items -// from the []complex64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - var selected []complex64 - v.EachComplex64(func(index int, val complex64) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupComplex64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex64. -func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - groups := make(map[string][]complex64) - v.EachComplex64(func(index int, val complex64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceComplex64 uses the specified function to replace each complex64s -// by iterating each item. The data in the returned result will be a -// []complex64 containing the replaced items. -func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - arr := v.MustComplex64Slice() - replaced := make([]complex64, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectComplex64 uses the specified collector function to collect a value -// for each of the complex64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - arr := v.MustComplex64Slice() - collected := make([]interface{}, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} - -/* - Complex128 (complex128 and []complex128) -*/ - -// Complex128 gets the value as a complex128, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex128(optionalDefault ...complex128) complex128 { - if s, ok := v.data.(complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex128 gets the value as a complex128. -// -// Panics if the object is not a complex128. -func (v *Value) MustComplex128() complex128 { - return v.data.(complex128) -} - -// Complex128Slice gets the value as a []complex128, returns the optionalDefault -// value or nil if the value is not a []complex128. -func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { - if s, ok := v.data.([]complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex128Slice gets the value as a []complex128. -// -// Panics if the object is not a []complex128. -func (v *Value) MustComplex128Slice() []complex128 { - return v.data.([]complex128) -} - -// IsComplex128 gets whether the object contained is a complex128 or not. -func (v *Value) IsComplex128() bool { - _, ok := v.data.(complex128) - return ok -} - -// IsComplex128Slice gets whether the object contained is a []complex128 or not. -func (v *Value) IsComplex128Slice() bool { - _, ok := v.data.([]complex128) - return ok -} - -// EachComplex128 calls the specified callback for each object -// in the []complex128. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - for index, val := range v.MustComplex128Slice() { - carryon := callback(index, val) - if !carryon { - break - } - } - return v -} - -// WhereComplex128 uses the specified decider function to select items -// from the []complex128. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - var selected []complex128 - v.EachComplex128(func(index int, val complex128) bool { - shouldSelect := decider(index, val) - if !shouldSelect { - selected = append(selected, val) - } - return true - }) - return &Value{data: selected} -} - -// GroupComplex128 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex128. -func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - groups := make(map[string][]complex128) - v.EachComplex128(func(index int, val complex128) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex128, 0) - } - groups[group] = append(groups[group], val) - return true - }) - return &Value{data: groups} -} - -// ReplaceComplex128 uses the specified function to replace each complex128s -// by iterating each item. The data in the returned result will be a -// []complex128 containing the replaced items. -func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - arr := v.MustComplex128Slice() - replaced := make([]complex128, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { - replaced[index] = replacer(index, val) - return true - }) - return &Value{data: replaced} -} - -// CollectComplex128 uses the specified collector function to collect a value -// for each of the complex128s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - arr := v.MustComplex128Slice() - collected := make([]interface{}, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} -} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go deleted file mode 100644 index e4b4a1433..000000000 --- a/vendor/github.com/stretchr/objx/value.go +++ /dev/null @@ -1,53 +0,0 @@ -package objx - -import ( - "fmt" - "strconv" -) - -// Value provides methods for extracting interface{} data in various -// types. -type Value struct { - // data contains the raw data being managed by this Value - data interface{} -} - -// Data returns the raw data contained by this Value -func (v *Value) Data() interface{} { - return v.data -} - -// String returns the value always as a string -func (v *Value) String() string { - switch { - case v.IsStr(): - return v.Str() - case v.IsBool(): - return strconv.FormatBool(v.Bool()) - case v.IsFloat32(): - return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) - case v.IsFloat64(): - return strconv.FormatFloat(v.Float64(), 'f', -1, 64) - case v.IsInt(): - return strconv.FormatInt(int64(v.Int()), 10) - case v.IsInt8(): - return strconv.FormatInt(int64(v.Int8()), 10) - case v.IsInt16(): - return strconv.FormatInt(int64(v.Int16()), 10) - case v.IsInt32(): - return strconv.FormatInt(int64(v.Int32()), 10) - case v.IsInt64(): - return strconv.FormatInt(v.Int64(), 10) - case v.IsUint(): - return strconv.FormatUint(uint64(v.Uint()), 10) - case v.IsUint8(): - return strconv.FormatUint(uint64(v.Uint8()), 10) - case v.IsUint16(): - return strconv.FormatUint(uint64(v.Uint16()), 10) - case v.IsUint32(): - return strconv.FormatUint(uint64(v.Uint32()), 10) - case v.IsUint64(): - return strconv.FormatUint(v.Uint64(), 10) - } - return fmt.Sprintf("%#v", v.Data()) -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index f38ec5956..000000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index e0364e9e7..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,566 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return DirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// Eventuallyf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// Greaterf asserts that the first element is greater than the second -// -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") -func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") -func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Lessf asserts that the first element is less than the second -// -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) -// assert.Lessf(t, "a", "b", "error message %s", "formatted") -func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Less(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") -func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Samef asserts that two pointers reference the same object. -// -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Same(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b817..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index 26830403a..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,1120 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExists(a.t, path, msgAndArgs...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExistsf(a.t, path, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Eventually asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// Eventuallyf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExists(a.t, path, msgAndArgs...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExistsf(a.t, path, msg, args...) -} - -// Greater asserts that the first element is greater than the second -// -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") -func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greater(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") -func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqualf(a.t, e1, e2, msg, args...) -} - -// Greaterf asserts that the first element is greater than the second -// -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) -// a.Greaterf("b", "a", "error message %s", "formatted") -func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greaterf(a.t, e1, e2, msg, args...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPError(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPErrorf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Less asserts that the first element is less than the second -// -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") -func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Less(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") -func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqualf(a.t, e1, e2, msg, args...) -} - -// Lessf asserts that the first element is less than the second -// -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) -// a.Lessf("a", "b", "error message %s", "formatted") -func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lessf(a.t, e1, e2, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Same asserts that two pointers reference the same object. -// -// a.Same(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Same(a.t, expected, actual, msgAndArgs...) -} - -// Samef asserts that two pointers reference the same object. -// -// a.Samef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Samef(a.t, expected, actual, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDurationf(a.t, expected, actual, delta, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e17..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go deleted file mode 100644 index 15a486ca6..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ /dev/null @@ -1,309 +0,0 @@ -package assert - -import ( - "fmt" - "reflect" -) - -func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { - switch kind { - case reflect.Int: - { - intobj1 := obj1.(int) - intobj2 := obj2.(int) - if intobj1 > intobj2 { - return -1, true - } - if intobj1 == intobj2 { - return 0, true - } - if intobj1 < intobj2 { - return 1, true - } - } - case reflect.Int8: - { - int8obj1 := obj1.(int8) - int8obj2 := obj2.(int8) - if int8obj1 > int8obj2 { - return -1, true - } - if int8obj1 == int8obj2 { - return 0, true - } - if int8obj1 < int8obj2 { - return 1, true - } - } - case reflect.Int16: - { - int16obj1 := obj1.(int16) - int16obj2 := obj2.(int16) - if int16obj1 > int16obj2 { - return -1, true - } - if int16obj1 == int16obj2 { - return 0, true - } - if int16obj1 < int16obj2 { - return 1, true - } - } - case reflect.Int32: - { - int32obj1 := obj1.(int32) - int32obj2 := obj2.(int32) - if int32obj1 > int32obj2 { - return -1, true - } - if int32obj1 == int32obj2 { - return 0, true - } - if int32obj1 < int32obj2 { - return 1, true - } - } - case reflect.Int64: - { - int64obj1 := obj1.(int64) - int64obj2 := obj2.(int64) - if int64obj1 > int64obj2 { - return -1, true - } - if int64obj1 == int64obj2 { - return 0, true - } - if int64obj1 < int64obj2 { - return 1, true - } - } - case reflect.Uint: - { - uintobj1 := obj1.(uint) - uintobj2 := obj2.(uint) - if uintobj1 > uintobj2 { - return -1, true - } - if uintobj1 == uintobj2 { - return 0, true - } - if uintobj1 < uintobj2 { - return 1, true - } - } - case reflect.Uint8: - { - uint8obj1 := obj1.(uint8) - uint8obj2 := obj2.(uint8) - if uint8obj1 > uint8obj2 { - return -1, true - } - if uint8obj1 == uint8obj2 { - return 0, true - } - if uint8obj1 < uint8obj2 { - return 1, true - } - } - case reflect.Uint16: - { - uint16obj1 := obj1.(uint16) - uint16obj2 := obj2.(uint16) - if uint16obj1 > uint16obj2 { - return -1, true - } - if uint16obj1 == uint16obj2 { - return 0, true - } - if uint16obj1 < uint16obj2 { - return 1, true - } - } - case reflect.Uint32: - { - uint32obj1 := obj1.(uint32) - uint32obj2 := obj2.(uint32) - if uint32obj1 > uint32obj2 { - return -1, true - } - if uint32obj1 == uint32obj2 { - return 0, true - } - if uint32obj1 < uint32obj2 { - return 1, true - } - } - case reflect.Uint64: - { - uint64obj1 := obj1.(uint64) - uint64obj2 := obj2.(uint64) - if uint64obj1 > uint64obj2 { - return -1, true - } - if uint64obj1 == uint64obj2 { - return 0, true - } - if uint64obj1 < uint64obj2 { - return 1, true - } - } - case reflect.Float32: - { - float32obj1 := obj1.(float32) - float32obj2 := obj2.(float32) - if float32obj1 > float32obj2 { - return -1, true - } - if float32obj1 == float32obj2 { - return 0, true - } - if float32obj1 < float32obj2 { - return 1, true - } - } - case reflect.Float64: - { - float64obj1 := obj1.(float64) - float64obj2 := obj2.(float64) - if float64obj1 > float64obj2 { - return -1, true - } - if float64obj1 == float64obj2 { - return 0, true - } - if float64obj1 < float64obj2 { - return 1, true - } - } - case reflect.String: - { - stringobj1 := obj1.(string) - stringobj2 := obj2.(string) - if stringobj1 > stringobj2 { - return -1, true - } - if stringobj1 == stringobj2 { - return 0, true - } - if stringobj1 < stringobj2 { - return 1, true - } - } - } - - return 0, false -} - -// Greater asserts that the first element is greater than the second -// -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") -func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if res != -1 { - return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) - } - - return true -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") -func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if res != -1 && res != 0 { - return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) - } - - return true -} - -// Less asserts that the first element is less than the second -// -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") -func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if res != 1 { - return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) - } - - return true -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") -func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - res, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if res != 1 && res != 0 { - return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) - } - - return true -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 044da8b01..000000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1498 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "os" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v2" -) - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - if len(parts) > 1 { - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - msg := msgAndArgs[0] - if msgAsStr, ok := msg.(string); ok { - return msgAsStr - } - return fmt.Sprintf("%+v", msg) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Same asserts that two pointers reference the same object. -// -// assert.Same(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) - if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { - return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) - } - - expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) - if expectedType != actualType { - return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", - expectedType, actualType), msgAndArgs...) - } - - if expected != actual { - return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) - } - - return true -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%#v)", expected, expected), - fmt.Sprintf("%T(%#v)", actual, actual) - } - - return fmt.Sprintf("%#v", expected), - fmt.Sprintf("%#v", actual) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, - kind) - - if isNilableKind && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if h, ok := t.(interface { - Helper() - }); ok { - h.Helper() - } - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if listKind == reflect.String { - elementValue := reflect.ValueOf(element) - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if listKind == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - aKind := reflect.TypeOf(listA).Kind() - bKind := reflect.TypeOf(listB).Kind() - - if aKind != reflect.Array && aKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) - } - - if bKind != reflect.Array && bKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) - } - - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - if aLen != bLen { - return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) - } - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) - } - } - - return true -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - case time.Duration: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if err == nil { - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) - } - return true -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if !info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -// YAMLEq asserts that two YAML strings are equivalent. -func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedYAMLAsInterface, actualYAMLAsInterface interface{} - - if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} - -// Eventually asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) -func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - timer := time.NewTimer(waitFor) - ticker := time.NewTicker(tick) - checkPassed := make(chan bool) - defer timer.Stop() - defer ticker.Stop() - defer close(checkPassed) - for { - select { - case <-timer.C: - return Fail(t, "Condition never satisfied", msgAndArgs...) - case result := <-checkPassed: - if result { - return true - } - case <-ticker.C: - go func() { - checkPassed <- condition() - }() - } - } -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4d6..000000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d1d..000000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index 9ad56851d..000000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index df46fa777..000000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,143 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 and -// an error if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) - if err != nil { - return -1, err - } - req.URL.RawQuery = values.Encode() - handler(w, req) - return w.Code, nil -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent - if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isSuccessCode -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect - if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isRedirectCode -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isErrorCode := code >= http.StatusBadRequest - if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isErrorCode -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return !contains -} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go deleted file mode 100644 index 7324128ef..000000000 --- a/vendor/github.com/stretchr/testify/mock/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package mock provides a system by which it is possible to mock your objects -// and verify calls are happening as expected. -// -// Example Usage -// -// The mock package provides an object, Mock, that tracks activity on another object. It is usually -// embedded into a test object as shown below: -// -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock -// -// // other fields go here as normal -// } -// -// When implementing the methods of an interface, you wire your functions up -// to call the Mock.Called(args...) method, and return the appropriate values. -// -// For example, to mock a method that saves the name and age of a person and returns -// the year of their birth or an error, you might write this: -// -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } -// -// The Int, Error and Bool methods are examples of strongly typed getters that take the argument -// index position. Given this argument list: -// -// (12, true, "Something") -// -// You could read them out strongly typed like this: -// -// args.Int(0) -// args.Bool(1) -// args.String(2) -// -// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: -// -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) -// -// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those -// cases you should check for nil first. -package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go deleted file mode 100644 index b5288af5b..000000000 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ /dev/null @@ -1,894 +0,0 @@ -package mock - -import ( - "errors" - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "sync" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" - "github.com/stretchr/objx" - "github.com/stretchr/testify/assert" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - FailNow() -} - -/* - Call -*/ - -// Call represents a method call and is used for setting expectations, -// as well as recording activity. -type Call struct { - Parent *Mock - - // The name of the method that was or will be called. - Method string - - // Holds the arguments of the method. - Arguments Arguments - - // Holds the arguments that should be returned when - // this method is called. - ReturnArguments Arguments - - // Holds the caller info for the On() call - callerInfo []string - - // The number of times to return the return arguments when setting - // expectations. 0 means to always return the value. - Repeatability int - - // Amount of times this call has been called - totalCalls int - - // Call to this method can be optional - optional bool - - // Holds a channel that will be used to block the Return until it either - // receives a message or is closed. nil means it returns immediately. - WaitFor <-chan time.Time - - waitTime time.Duration - - // Holds a handler used to manipulate arguments content that are passed by - // reference. It's useful when mocking methods such as unmarshalers or - // decoders. - RunFn func(Arguments) -} - -func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { - return &Call{ - Parent: parent, - Method: methodName, - Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), - callerInfo: callerInfo, - Repeatability: 0, - WaitFor: nil, - RunFn: nil, - } -} - -func (c *Call) lock() { - c.Parent.mutex.Lock() -} - -func (c *Call) unlock() { - c.Parent.mutex.Unlock() -} - -// Return specifies the return arguments for the expectation. -// -// Mock.On("DoSomething").Return(errors.New("failed")) -func (c *Call) Return(returnArguments ...interface{}) *Call { - c.lock() - defer c.unlock() - - c.ReturnArguments = returnArguments - - return c -} - -// Once indicates that that the mock should only return the value once. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() -func (c *Call) Once() *Call { - return c.Times(1) -} - -// Twice indicates that that the mock should only return the value twice. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() -func (c *Call) Twice() *Call { - return c.Times(2) -} - -// Times indicates that that the mock should only return the indicated number -// of times. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) -func (c *Call) Times(i int) *Call { - c.lock() - defer c.unlock() - c.Repeatability = i - return c -} - -// WaitUntil sets the channel that will block the mock's return until its closed -// or a message is received. -// -// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) -func (c *Call) WaitUntil(w <-chan time.Time) *Call { - c.lock() - defer c.unlock() - c.WaitFor = w - return c -} - -// After sets how long to block until the call returns -// -// Mock.On("MyMethod", arg1, arg2).After(time.Second) -func (c *Call) After(d time.Duration) *Call { - c.lock() - defer c.unlock() - c.waitTime = d - return c -} - -// Run sets a handler to be called before returning. It can be used when -// mocking a method such as unmarshalers that takes a pointer to a struct and -// sets properties in such struct -// -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { -// arg := args.Get(0).(*map[string]interface{}) -// arg["foo"] = "bar" -// }) -func (c *Call) Run(fn func(args Arguments)) *Call { - c.lock() - defer c.unlock() - c.RunFn = fn - return c -} - -// Maybe allows the method call to be optional. Not calling an optional method -// will not cause an error while asserting expectations -func (c *Call) Maybe() *Call { - c.lock() - defer c.unlock() - c.optional = true - return c -} - -// On chains a new expectation description onto the mocked interface. This -// allows syntax like. -// -// Mock. -// On("MyMethod", 1).Return(nil). -// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) -//go:noinline -func (c *Call) On(methodName string, arguments ...interface{}) *Call { - return c.Parent.On(methodName, arguments...) -} - -// Mock is the workhorse used to track activity on another object. -// For an example of its usage, refer to the "Example Usage" section at the top -// of this document. -type Mock struct { - // Represents the calls that are expected of - // an object. - ExpectedCalls []*Call - - // Holds the calls that were made to this mocked object. - Calls []Call - - // test is An optional variable that holds the test struct, to be used when an - // invalid mock call was made. - test TestingT - - // TestData holds any data that might be useful for testing. Testify ignores - // this data completely allowing you to do whatever you like with it. - testData objx.Map - - mutex sync.Mutex -} - -// TestData holds any data that might be useful for testing. Testify ignores -// this data completely allowing you to do whatever you like with it. -func (m *Mock) TestData() objx.Map { - - if m.testData == nil { - m.testData = make(objx.Map) - } - - return m.testData -} - -/* - Setting expectations -*/ - -// Test sets the test struct variable of the mock object -func (m *Mock) Test(t TestingT) { - m.mutex.Lock() - defer m.mutex.Unlock() - m.test = t -} - -// fail fails the current test with the given formatted format and args. -// In case that a test was defined, it uses the test APIs for failing a test, -// otherwise it uses panic. -func (m *Mock) fail(format string, args ...interface{}) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.test == nil { - panic(fmt.Sprintf(format, args...)) - } - m.test.Errorf(format, args...) - m.test.FailNow() -} - -// On starts a description of an expectation of the specified method -// being called. -// -// Mock.On("MyMethod", arg1, arg2) -func (m *Mock) On(methodName string, arguments ...interface{}) *Call { - for _, arg := range arguments { - if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { - panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) - } - } - - m.mutex.Lock() - defer m.mutex.Unlock() - c := newCall(m, methodName, assert.CallerInfo(), arguments...) - m.ExpectedCalls = append(m.ExpectedCalls, c) - return c -} - -// /* -// Recording and responding to activity -// */ - -func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - var expectedCall *Call - - for i, call := range m.ExpectedCalls { - if call.Method == method { - _, diffCount := call.Arguments.Diff(arguments) - if diffCount == 0 { - expectedCall = call - if call.Repeatability > -1 { - return i, call - } - } - } - } - - return -1, expectedCall -} - -func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { - var diffCount int - var closestCall *Call - var err string - - for _, call := range m.expectedCalls() { - if call.Method == method { - - errInfo, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = call - err = errInfo - } - - } - } - - return closestCall, err -} - -func callString(method string, arguments Arguments, includeArgumentValues bool) string { - - var argValsString string - if includeArgumentValues { - var argVals []string - for argIndex, arg := range arguments { - argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) - } - argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) - } - - return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) -} - -// Called tells the mock object that a method has been called, and gets an array -// of arguments to return. Panics if the call is unexpected (i.e. not preceded by -// appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) Called(arguments ...interface{}) Arguments { - // get the calling function's name - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Couldn't get the caller information") - } - functionPath := runtime.FuncForPC(pc).Name() - //Next four lines are required to use GCCGO function naming conventions. - //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock - //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree - //With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] - } - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - return m.MethodCalled(functionName, arguments...) -} - -// MethodCalled tells the mock object that the given method has been called, and gets -// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded -// by appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { - m.mutex.Lock() - //TODO: could combine expected and closes in single loop - found, call := m.findExpectedCall(methodName, arguments...) - - if found < 0 { - // expected call found but it has already been called with repeatable times - if call != nil { - m.mutex.Unlock() - m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) - } - // we have to fail here - because we don't know what to do - // as the return arguments. This is because: - // - // a) this is a totally unexpected call to this method, - // b) the arguments are not what was expected, or - // c) the developer has forgotten to add an accompanying On...Return pair. - closestCall, mismatch := m.findClosestCall(methodName, arguments...) - m.mutex.Unlock() - - if closestCall != nil { - m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", - callString(methodName, arguments, true), - callString(methodName, closestCall.Arguments, true), - diffArguments(closestCall.Arguments, arguments), - strings.TrimSpace(mismatch), - ) - } else { - m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) - } - } - - if call.Repeatability == 1 { - call.Repeatability = -1 - } else if call.Repeatability > 1 { - call.Repeatability-- - } - call.totalCalls++ - - // add the call - m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) - m.mutex.Unlock() - - // block if specified - if call.WaitFor != nil { - <-call.WaitFor - } else { - time.Sleep(call.waitTime) - } - - m.mutex.Lock() - runFn := call.RunFn - m.mutex.Unlock() - - if runFn != nil { - runFn(arguments) - } - - m.mutex.Lock() - returnArgs := call.ReturnArguments - m.mutex.Unlock() - - return returnArgs -} - -/* - Assertions -*/ - -type assertExpectationser interface { - AssertExpectations(TestingT) bool -} - -// AssertExpectationsForObjects asserts that everything specified with On and Return -// of the specified objects was in fact called as expected. -// -// Calls may have occurred in any order. -func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - for _, obj := range testObjects { - if m, ok := obj.(Mock); ok { - t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") - obj = &m - } - m := obj.(assertExpectationser) - if !m.AssertExpectations(t) { - t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) - return false - } - } - return true -} - -// AssertExpectations asserts that everything specified with On and Return was -// in fact called as expected. Calls may have occurred in any order. -func (m *Mock) AssertExpectations(t TestingT) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - var somethingMissing bool - var failedExpectations int - - // iterate through each expectation - expectedCalls := m.expectedCalls() - for _, expectedCall := range expectedCalls { - if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - if expectedCall.Repeatability > 0 { - somethingMissing = true - failedExpectations++ - t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) - } else { - t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } - } - } - - if somethingMissing { - t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) - } - - return !somethingMissing -} - -// AssertNumberOfCalls asserts that the method was called expectedCalls times. -func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - var actualCalls int - for _, call := range m.calls() { - if call.Method == methodName { - actualCalls++ - } - } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) -} - -// AssertCalled asserts that the method was called. -// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. -func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - if !m.methodWasCalled(methodName, arguments) { - var calledWithArgs []string - for _, call := range m.calls() { - calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) - } - if len(calledWithArgs) == 0 { - return assert.Fail(t, "Should have called with given arguments", - fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) - } - return assert.Fail(t, "Should have called with given arguments", - fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) - } - return true -} - -// AssertNotCalled asserts that the method was not called. -// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. -func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - m.mutex.Lock() - defer m.mutex.Unlock() - if m.methodWasCalled(methodName, arguments) { - return assert.Fail(t, "Should not have called with given arguments", - fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) - } - return true -} - -func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { - for _, call := range m.calls() { - if call.Method == methodName { - - _, differences := Arguments(expected).Diff(call.Arguments) - - if differences == 0 { - // found the expected call - return true - } - - } - } - // we didn't find the expected call - return false -} - -func (m *Mock) expectedCalls() []*Call { - return append([]*Call{}, m.ExpectedCalls...) -} - -func (m *Mock) calls() []Call { - return append([]Call{}, m.Calls...) -} - -/* - Arguments -*/ - -// Arguments holds an array of method arguments or return values. -type Arguments []interface{} - -const ( - // Anything is used in Diff and Assert when the argument being tested - // shouldn't be taken into consideration. - Anything = "mock.Anything" -) - -// AnythingOfTypeArgument is a string that contains the type of an argument -// for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string - -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. -// -// For example: -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) -func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) -} - -// argumentMatcher performs custom argument matching, returning whether or -// not the argument is matched by the expectation fixture function. -type argumentMatcher struct { - // fn is a function which accepts one argument, and returns a bool. - fn reflect.Value -} - -func (f argumentMatcher) Matches(argument interface{}) bool { - expectType := f.fn.Type().In(0) - expectTypeNilSupported := false - switch expectType.Kind() { - case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: - expectTypeNilSupported = true - } - - argType := reflect.TypeOf(argument) - var arg reflect.Value - if argType == nil { - arg = reflect.New(expectType).Elem() - } else { - arg = reflect.ValueOf(argument) - } - - if argType == nil && !expectTypeNilSupported { - panic(errors.New("attempting to call matcher with nil for non-nil expected type")) - } - if argType == nil || argType.AssignableTo(expectType) { - result := f.fn.Call([]reflect.Value{arg}) - return result[0].Bool() - } - return false -} - -func (f argumentMatcher) String() string { - return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) -} - -// MatchedBy can be used to match a mock call based on only certain properties -// from a complex struct or some calculation. It takes a function that will be -// evaluated with the called argument and will return true when there's a match -// and false otherwise. -// -// Example: -// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) -// -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, -// MatchedBy() panics. -func MatchedBy(fn interface{}) argumentMatcher { - fnType := reflect.TypeOf(fn) - - if fnType.Kind() != reflect.Func { - panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) - } - if fnType.NumIn() != 1 { - panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) - } - if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { - panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) - } - - return argumentMatcher{fn: reflect.ValueOf(fn)} -} - -// Get Returns the argument at the specified index. -func (args Arguments) Get(index int) interface{} { - if index+1 > len(args) { - panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) - } - return args[index] -} - -// Is gets whether the objects match the arguments specified. -func (args Arguments) Is(objects ...interface{}) bool { - for i, obj := range args { - if obj != objects[i] { - return false - } - } - return true -} - -// Diff gets a string describing the differences between the arguments -// and the specified objects. -// -// Returns the diff string and number of differences found. -func (args Arguments) Diff(objects []interface{}) (string, int) { - //TODO: could return string as error and nil for No difference - - var output = "\n" - var differences int - - var maxArgCount = len(args) - if len(objects) > maxArgCount { - maxArgCount = len(objects) - } - - for i := 0; i < maxArgCount; i++ { - var actual, expected interface{} - var actualFmt, expectedFmt string - - if len(objects) <= i { - actual = "(Missing)" - actualFmt = "(Missing)" - } else { - actual = objects[i] - actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) - } - - if len(args) <= i { - expected = "(Missing)" - expectedFmt = "(Missing)" - } else { - expected = args[i] - expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) - } - - if matcher, ok := expected.(argumentMatcher); ok { - if matcher.Matches(actual) { - output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) - } else { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - - } else { - - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } - } - - } - - if differences == 0 { - return "No differences.", differences - } - - return output, differences - -} - -// Assert compares the arguments with the specified objects and fails if -// they do not exactly match. -func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - // get the differences - diff, diffCount := args.Diff(objects) - - if diffCount == 0 { - return true - } - - // there are differences... report them... - t.Logf(diff) - t.Errorf("%sArguments do not match.", assert.CallerInfo()) - - return false - -} - -// String gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -// -// If no index is provided, String() returns a complete string representation -// of the arguments. -func (args Arguments) String(indexOrNil ...int) string { - - if len(indexOrNil) == 0 { - // normal String() method - return a string representation of the args - var argsStr []string - for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) - } - return strings.Join(argsStr, ",") - } else if len(indexOrNil) == 1 { - // Index has been specified - get the argument at that index - var index = indexOrNil[0] - var s string - var ok bool - if s, ok = args.Get(index).(string); !ok { - panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) - } - return s - } - - panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - -} - -// Int gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Int(index int) int { - var s int - var ok bool - if s, ok = args.Get(index).(int); !ok { - panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Error gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Error(index int) error { - obj := args.Get(index) - var s error - var ok bool - if obj == nil { - return nil - } - if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Bool gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Bool(index int) bool { - var s bool - var ok bool - if s, ok = args.Get(index).(bool); !ok { - panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -func diffArguments(expected Arguments, actual Arguments) string { - if len(expected) != len(actual) { - return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) - } - - for x := range expected { - if diffString := diff(expected[x], actual[x]); diffString != "" { - return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) - } - } - - return "" -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { - return "" - } - - e := spewConfig.Sdump(expected) - a := spewConfig.Sdump(actual) - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return diff -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE deleted file mode 100644 index 14d60424e..000000000 --- a/vendor/github.com/tinylib/msgp/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2014 Philip Hofer -Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go deleted file mode 100644 index 6c6bb37a5..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build linux,!appengine - -package msgp - -import ( - "os" - "syscall" -) - -func adviseRead(mem []byte) { - syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) -} - -func adviseWrite(mem []byte) { - syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) -} - -func fallocate(f *os.File, sz int64) error { - err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) - if err == syscall.ENOTSUP { - return f.Truncate(sz) - } - return err -} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go deleted file mode 100644 index da65ea541..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/advise_other.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux appengine - -package msgp - -import ( - "os" -) - -// TODO: darwin, BSD support - -func adviseRead(mem []byte) {} - -func adviseWrite(mem []byte) {} - -func fallocate(f *os.File, sz int64) error { - return f.Truncate(sz) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go deleted file mode 100644 index a0434c7ea..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/circular.go +++ /dev/null @@ -1,39 +0,0 @@ -package msgp - -type timer interface { - StartTimer() - StopTimer() -} - -// EndlessReader is an io.Reader -// that loops over the same data -// endlessly. It is used for benchmarking. -type EndlessReader struct { - tb timer - data []byte - offset int -} - -// NewEndlessReader returns a new endless reader -func NewEndlessReader(b []byte, tb timer) *EndlessReader { - return &EndlessReader{tb: tb, data: b, offset: 0} -} - -// Read implements io.Reader. In practice, it -// always returns (len(p), nil), although it -// fills the supplied slice while the benchmark -// timer is stopped. -func (c *EndlessReader) Read(p []byte) (int, error) { - c.tb.StopTimer() - var n int - l := len(p) - m := len(c.data) - for n < l { - nn := copy(p[n:], c.data[c.offset:]) - n += nn - c.offset += nn - c.offset %= m - } - c.tb.StartTimer() - return n, nil -} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go deleted file mode 100644 index c634eef1d..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/defs.go +++ /dev/null @@ -1,142 +0,0 @@ -// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). -// -// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack -// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code -// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. -// -// This package defines four "families" of functions: -// - AppendXxxx() appends an object to a []byte in MessagePack encoding. -// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. -// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. -// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. -// -// Once a type has satisfied the `Encodable` and `Decodable` interfaces, -// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using -// msgp.Encode(io.Writer, msgp.Encodable) -// and -// msgp.Decode(io.Reader, msgp.Decodable) -// -// There are also methods for converting MessagePack to JSON without -// an explicit de-serialization step. -// -// For additional tips, tricks, and gotchas, please visit -// the wiki at http://github.com/tinylib/msgp -package msgp - -const last4 = 0x0f -const first4 = 0xf0 -const last5 = 0x1f -const first3 = 0xe0 -const last7 = 0x7f - -func isfixint(b byte) bool { - return b>>7 == 0 -} - -func isnfixint(b byte) bool { - return b&first3 == mnfixint -} - -func isfixmap(b byte) bool { - return b&first4 == mfixmap -} - -func isfixarray(b byte) bool { - return b&first4 == mfixarray -} - -func isfixstr(b byte) bool { - return b&first3 == mfixstr -} - -func wfixint(u uint8) byte { - return u & last7 -} - -func rfixint(b byte) uint8 { - return b -} - -func wnfixint(i int8) byte { - return byte(i) | mnfixint -} - -func rnfixint(b byte) int8 { - return int8(b) -} - -func rfixmap(b byte) uint8 { - return b & last4 -} - -func wfixmap(u uint8) byte { - return mfixmap | (u & last4) -} - -func rfixstr(b byte) uint8 { - return b & last5 -} - -func wfixstr(u uint8) byte { - return (u & last5) | mfixstr -} - -func rfixarray(b byte) uint8 { - return (b & last4) -} - -func wfixarray(u uint8) byte { - return (u & last4) | mfixarray -} - -// These are all the byte -// prefixes defined by the -// msgpack standard -const ( - // 0XXXXXXX - mfixint uint8 = 0x00 - - // 111XXXXX - mnfixint uint8 = 0xe0 - - // 1000XXXX - mfixmap uint8 = 0x80 - - // 1001XXXX - mfixarray uint8 = 0x90 - - // 101XXXXX - mfixstr uint8 = 0xa0 - - mnil uint8 = 0xc0 - mfalse uint8 = 0xc2 - mtrue uint8 = 0xc3 - mbin8 uint8 = 0xc4 - mbin16 uint8 = 0xc5 - mbin32 uint8 = 0xc6 - mext8 uint8 = 0xc7 - mext16 uint8 = 0xc8 - mext32 uint8 = 0xc9 - mfloat32 uint8 = 0xca - mfloat64 uint8 = 0xcb - muint8 uint8 = 0xcc - muint16 uint8 = 0xcd - muint32 uint8 = 0xce - muint64 uint8 = 0xcf - mint8 uint8 = 0xd0 - mint16 uint8 = 0xd1 - mint32 uint8 = 0xd2 - mint64 uint8 = 0xd3 - mfixext1 uint8 = 0xd4 - mfixext2 uint8 = 0xd5 - mfixext4 uint8 = 0xd6 - mfixext8 uint8 = 0xd7 - mfixext16 uint8 = 0xd8 - mstr8 uint8 = 0xd9 - mstr16 uint8 = 0xda - mstr32 uint8 = 0xdb - marray16 uint8 = 0xdc - marray32 uint8 = 0xdd - mmap16 uint8 = 0xde - mmap32 uint8 = 0xdf -) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go deleted file mode 100644 index b473a6f66..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/edit.go +++ /dev/null @@ -1,242 +0,0 @@ -package msgp - -import ( - "math" -) - -// Locate returns a []byte pointing to the field -// in a messagepack map with the provided key. (The returned []byte -// points to a sub-slice of 'raw'; Locate does no allocations.) If the -// key doesn't exist in the map, a zero-length []byte will be returned. -func Locate(key string, raw []byte) []byte { - s, n := locate(raw, key) - return raw[s:n] -} - -// Replace takes a key ("key") in a messagepack map ("raw") -// and replaces its value with the one provided and returns -// the new []byte. The returned []byte may point to the same -// memory as "raw". Replace makes no effort to evaluate the validity -// of the contents of 'val'. It may use up to the full capacity of 'raw.' -// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' -// is not a map. -func Replace(key string, raw []byte, val []byte) []byte { - start, end := locate(raw, key) - if start == end { - return nil - } - return replace(raw, start, end, val, true) -} - -// CopyReplace works similarly to Replace except that the returned -// byte slice does not point to the same memory as 'raw'. CopyReplace -// returns 'nil' if the field doesn't exist or 'raw' isn't a map. -func CopyReplace(key string, raw []byte, val []byte) []byte { - start, end := locate(raw, key) - if start == end { - return nil - } - return replace(raw, start, end, val, false) -} - -// Remove removes a key-value pair from 'raw'. It returns -// 'raw' unchanged if the key didn't exist. -func Remove(key string, raw []byte) []byte { - start, end := locateKV(raw, key) - if start == end { - return raw - } - raw = raw[:start+copy(raw[start:], raw[end:])] - return resizeMap(raw, -1) -} - -// HasKey returns whether the map in 'raw' has -// a field with key 'key' -func HasKey(key string, raw []byte) bool { - sz, bts, err := ReadMapHeaderBytes(raw) - if err != nil { - return false - } - var field []byte - for i := uint32(0); i < sz; i++ { - field, bts, err = ReadStringZC(bts) - if err != nil { - return false - } - if UnsafeString(field) == key { - return true - } - } - return false -} - -func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { - ll := end - start // length of segment to replace - lv := len(val) - - if inplace { - extra := lv - ll - - // fastest case: we're doing - // a 1:1 replacement - if extra == 0 { - copy(raw[start:], val) - return raw - - } else if extra < 0 { - // 'val' smaller than replaced value - // copy in place and shift back - - x := copy(raw[start:], val) - y := copy(raw[start+x:], raw[end:]) - return raw[:start+x+y] - - } else if extra < cap(raw)-len(raw) { - // 'val' less than (cap-len) extra bytes - // copy in place and shift forward - raw = raw[0 : len(raw)+extra] - // shift end forward - copy(raw[end+extra:], raw[end:]) - copy(raw[start:], val) - return raw - } - } - - // we have to allocate new space - out := make([]byte, len(raw)+len(val)-ll) - x := copy(out, raw[:start]) - y := copy(out[x:], val) - copy(out[x+y:], raw[end:]) - return out -} - -// locate does a naive O(n) search for the map key; returns start, end -// (returns 0,0 on error) -func locate(raw []byte, key string) (start int, end int) { - var ( - sz uint32 - bts []byte - field []byte - err error - ) - sz, bts, err = ReadMapHeaderBytes(raw) - if err != nil { - return - } - - // loop and locate field - for i := uint32(0); i < sz; i++ { - field, bts, err = ReadStringZC(bts) - if err != nil { - return 0, 0 - } - if UnsafeString(field) == key { - // start location - l := len(raw) - start = l - len(bts) - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - end = l - len(bts) - return - } - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - } - return 0, 0 -} - -// locate key AND value -func locateKV(raw []byte, key string) (start int, end int) { - var ( - sz uint32 - bts []byte - field []byte - err error - ) - sz, bts, err = ReadMapHeaderBytes(raw) - if err != nil { - return 0, 0 - } - - for i := uint32(0); i < sz; i++ { - tmp := len(bts) - field, bts, err = ReadStringZC(bts) - if err != nil { - return 0, 0 - } - if UnsafeString(field) == key { - start = len(raw) - tmp - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - end = len(raw) - len(bts) - return - } - bts, err = Skip(bts) - if err != nil { - return 0, 0 - } - } - return 0, 0 -} - -// delta is delta on map size -func resizeMap(raw []byte, delta int64) []byte { - var sz int64 - switch raw[0] { - case mmap16: - sz = int64(big.Uint16(raw[1:])) - if sz+delta <= math.MaxUint16 { - big.PutUint16(raw[1:], uint16(sz+delta)) - return raw - } - if cap(raw)-len(raw) >= 2 { - raw = raw[0 : len(raw)+2] - copy(raw[5:], raw[3:]) - raw[0] = mmap32 - big.PutUint32(raw[1:], uint32(sz+delta)) - return raw - } - n := make([]byte, 0, len(raw)+5) - n = AppendMapHeader(n, uint32(sz+delta)) - return append(n, raw[3:]...) - - case mmap32: - sz = int64(big.Uint32(raw[1:])) - big.PutUint32(raw[1:], uint32(sz+delta)) - return raw - - default: - sz = int64(rfixmap(raw[0])) - if sz+delta < 16 { - raw[0] = wfixmap(uint8(sz + delta)) - return raw - } else if sz+delta <= math.MaxUint16 { - if cap(raw)-len(raw) >= 2 { - raw = raw[0 : len(raw)+2] - copy(raw[3:], raw[1:]) - raw[0] = mmap16 - big.PutUint16(raw[1:], uint16(sz+delta)) - return raw - } - n := make([]byte, 0, len(raw)+5) - n = AppendMapHeader(n, uint32(sz+delta)) - return append(n, raw[1:]...) - } - if cap(raw)-len(raw) >= 4 { - raw = raw[0 : len(raw)+4] - copy(raw[5:], raw[1:]) - raw[0] = mmap32 - big.PutUint32(raw[1:], uint32(sz+delta)) - return raw - } - n := make([]byte, 0, len(raw)+5) - n = AppendMapHeader(n, uint32(sz+delta)) - return append(n, raw[1:]...) - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go deleted file mode 100644 index 95762e7ee..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/elsize.go +++ /dev/null @@ -1,99 +0,0 @@ -package msgp - -// size of every object on the wire, -// plus type information. gives us -// constant-time type information -// for traversing composite objects. -// -var sizes = [256]bytespec{ - mnil: {size: 1, extra: constsize, typ: NilType}, - mfalse: {size: 1, extra: constsize, typ: BoolType}, - mtrue: {size: 1, extra: constsize, typ: BoolType}, - mbin8: {size: 2, extra: extra8, typ: BinType}, - mbin16: {size: 3, extra: extra16, typ: BinType}, - mbin32: {size: 5, extra: extra32, typ: BinType}, - mext8: {size: 3, extra: extra8, typ: ExtensionType}, - mext16: {size: 4, extra: extra16, typ: ExtensionType}, - mext32: {size: 6, extra: extra32, typ: ExtensionType}, - mfloat32: {size: 5, extra: constsize, typ: Float32Type}, - mfloat64: {size: 9, extra: constsize, typ: Float64Type}, - muint8: {size: 2, extra: constsize, typ: UintType}, - muint16: {size: 3, extra: constsize, typ: UintType}, - muint32: {size: 5, extra: constsize, typ: UintType}, - muint64: {size: 9, extra: constsize, typ: UintType}, - mint8: {size: 2, extra: constsize, typ: IntType}, - mint16: {size: 3, extra: constsize, typ: IntType}, - mint32: {size: 5, extra: constsize, typ: IntType}, - mint64: {size: 9, extra: constsize, typ: IntType}, - mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, - mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, - mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, - mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, - mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, - mstr8: {size: 2, extra: extra8, typ: StrType}, - mstr16: {size: 3, extra: extra16, typ: StrType}, - mstr32: {size: 5, extra: extra32, typ: StrType}, - marray16: {size: 3, extra: array16v, typ: ArrayType}, - marray32: {size: 5, extra: array32v, typ: ArrayType}, - mmap16: {size: 3, extra: map16v, typ: MapType}, - mmap32: {size: 5, extra: map32v, typ: MapType}, -} - -func init() { - // set up fixed fields - - // fixint - for i := mfixint; i < 0x80; i++ { - sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} - } - - // nfixint - for i := uint16(mnfixint); i < 0x100; i++ { - sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} - } - - // fixstr gets constsize, - // since the prefix yields the size - for i := mfixstr; i < 0xc0; i++ { - sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} - } - - // fixmap - for i := mfixmap; i < 0x90; i++ { - sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} - } - - // fixarray - for i := mfixarray; i < 0xa0; i++ { - sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} - } -} - -// a valid bytespsec has -// non-zero 'size' and -// non-zero 'typ' -type bytespec struct { - size uint8 // prefix size information - extra varmode // extra size information - typ Type // type - _ byte // makes bytespec 4 bytes (yes, this matters) -} - -// size mode -// if positive, # elements for composites -type varmode int8 - -const ( - constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) - extra8 = -1 // has uint8(p[1]) extra bytes - extra16 = -2 // has be16(p[1:]) extra bytes - extra32 = -3 // has be32(p[1:]) extra bytes - map16v = -4 // use map16 - map32v = -5 // use map32 - array16v = -6 // use array16 - array32v = -7 // use array32 -) - -func getType(v byte) Type { - return sizes[v].typ -} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go deleted file mode 100644 index cc78a980c..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/errors.go +++ /dev/null @@ -1,314 +0,0 @@ -package msgp - -import ( - "fmt" - "reflect" -) - -const resumableDefault = false - -var ( - // ErrShortBytes is returned when the - // slice being decoded is too short to - // contain the contents of the message - ErrShortBytes error = errShort{} - - // this error is only returned - // if we reach code that should - // be unreachable - fatal error = errFatal{} -) - -// Error is the interface satisfied -// by all of the errors that originate -// from this package. -type Error interface { - error - - // Resumable returns whether - // or not the error means that - // the stream of data is malformed - // and the information is unrecoverable. - Resumable() bool -} - -// contextError allows msgp Error instances to be enhanced with additional -// context about their origin. -type contextError interface { - Error - - // withContext must not modify the error instance - it must clone and - // return a new error with the context added. - withContext(ctx string) error -} - -// Cause returns the underlying cause of an error that has been wrapped -// with additional context. -func Cause(e error) error { - out := e - if e, ok := e.(errWrapped); ok && e.cause != nil { - out = e.cause - } - return out -} - -// Resumable returns whether or not the error means that the stream of data is -// malformed and the information is unrecoverable. -func Resumable(e error) bool { - if e, ok := e.(Error); ok { - return e.Resumable() - } - return resumableDefault -} - -// WrapError wraps an error with additional context that allows the part of the -// serialized type that caused the problem to be identified. Underlying errors -// can be retrieved using Cause() -// -// The input error is not modified - a new error should be returned. -// -// ErrShortBytes is not wrapped with any context due to backward compatibility -// issues with the public API. -// -func WrapError(err error, ctx ...interface{}) error { - switch e := err.(type) { - case errShort: - return e - case contextError: - return e.withContext(ctxString(ctx)) - default: - return errWrapped{cause: err, ctx: ctxString(ctx)} - } -} - -// ctxString converts the incoming interface{} slice into a single string. -func ctxString(ctx []interface{}) string { - out := "" - for idx, cv := range ctx { - if idx > 0 { - out += "/" - } - out += fmt.Sprintf("%v", cv) - } - return out -} - -func addCtx(ctx, add string) string { - if ctx != "" { - return add + "/" + ctx - } else { - return add - } -} - -// errWrapped allows arbitrary errors passed to WrapError to be enhanced with -// context and unwrapped with Cause() -type errWrapped struct { - cause error - ctx string -} - -func (e errWrapped) Error() string { - if e.ctx != "" { - return fmt.Sprintf("%s at %s", e.cause, e.ctx) - } else { - return e.cause.Error() - } -} - -func (e errWrapped) Resumable() bool { - if e, ok := e.cause.(Error); ok { - return e.Resumable() - } - return resumableDefault -} - -type errShort struct{} - -func (e errShort) Error() string { return "msgp: too few bytes left to read object" } -func (e errShort) Resumable() bool { return false } - -type errFatal struct { - ctx string -} - -func (f errFatal) Error() string { - out := "msgp: fatal decoding error (unreachable code)" - if f.ctx != "" { - out += " at " + f.ctx - } - return out -} - -func (f errFatal) Resumable() bool { return false } - -func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } - -// ArrayError is an error returned -// when decoding a fix-sized array -// of the wrong size -type ArrayError struct { - Wanted uint32 - Got uint32 - ctx string -} - -// Error implements the error interface -func (a ArrayError) Error() string { - out := fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) - if a.ctx != "" { - out += " at " + a.ctx - } - return out -} - -// Resumable is always 'true' for ArrayErrors -func (a ArrayError) Resumable() bool { return true } - -func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a } - -// IntOverflow is returned when a call -// would downcast an integer to a type -// with too few bits to hold its value. -type IntOverflow struct { - Value int64 // the value of the integer - FailedBitsize int // the bit size that the int64 could not fit into - ctx string -} - -// Error implements the error interface -func (i IntOverflow) Error() string { - str := fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) - if i.ctx != "" { - str += " at " + i.ctx - } - return str -} - -// Resumable is always 'true' for overflows -func (i IntOverflow) Resumable() bool { return true } - -func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i } - -// UintOverflow is returned when a call -// would downcast an unsigned integer to a type -// with too few bits to hold its value -type UintOverflow struct { - Value uint64 // value of the uint - FailedBitsize int // the bit size that couldn't fit the value - ctx string -} - -// Error implements the error interface -func (u UintOverflow) Error() string { - str := fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) - if u.ctx != "" { - str += " at " + u.ctx - } - return str -} - -// Resumable is always 'true' for overflows -func (u UintOverflow) Resumable() bool { return true } - -func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } - -// UintBelowZero is returned when a call -// would cast a signed integer below zero -// to an unsigned integer. -type UintBelowZero struct { - Value int64 // value of the incoming int - ctx string -} - -// Error implements the error interface -func (u UintBelowZero) Error() string { - str := fmt.Sprintf("msgp: attempted to cast int %d to unsigned", u.Value) - if u.ctx != "" { - str += " at " + u.ctx - } - return str -} - -// Resumable is always 'true' for overflows -func (u UintBelowZero) Resumable() bool { return true } - -func (u UintBelowZero) withContext(ctx string) error { - u.ctx = ctx - return u -} - -// A TypeError is returned when a particular -// decoding method is unsuitable for decoding -// a particular MessagePack value. -type TypeError struct { - Method Type // Type expected by method - Encoded Type // Type actually encoded - - ctx string -} - -// Error implements the error interface -func (t TypeError) Error() string { - out := fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) - if t.ctx != "" { - out += " at " + t.ctx - } - return out -} - -// Resumable returns 'true' for TypeErrors -func (t TypeError) Resumable() bool { return true } - -func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t } - -// returns either InvalidPrefixError or -// TypeError depending on whether or not -// the prefix is recognized -func badPrefix(want Type, lead byte) error { - t := sizes[lead].typ - if t == InvalidType { - return InvalidPrefixError(lead) - } - return TypeError{Method: want, Encoded: t} -} - -// InvalidPrefixError is returned when a bad encoding -// uses a prefix that is not recognized in the MessagePack standard. -// This kind of error is unrecoverable. -type InvalidPrefixError byte - -// Error implements the error interface -func (i InvalidPrefixError) Error() string { - return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) -} - -// Resumable returns 'false' for InvalidPrefixErrors -func (i InvalidPrefixError) Resumable() bool { return false } - -// ErrUnsupportedType is returned -// when a bad argument is supplied -// to a function that takes `interface{}`. -type ErrUnsupportedType struct { - T reflect.Type - - ctx string -} - -// Error implements error -func (e *ErrUnsupportedType) Error() string { - out := fmt.Sprintf("msgp: type %q not supported", e.T) - if e.ctx != "" { - out += " at " + e.ctx - } - return out -} - -// Resumable returns 'true' for ErrUnsupportedType -func (e *ErrUnsupportedType) Resumable() bool { return true } - -func (e *ErrUnsupportedType) withContext(ctx string) error { - o := *e - o.ctx = addCtx(o.ctx, ctx) - return &o -} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go deleted file mode 100644 index 0b31dcdb7..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/extension.go +++ /dev/null @@ -1,549 +0,0 @@ -package msgp - -import ( - "fmt" - "math" -) - -const ( - // Complex64Extension is the extension number used for complex64 - Complex64Extension = 3 - - // Complex128Extension is the extension number used for complex128 - Complex128Extension = 4 - - // TimeExtension is the extension number used for time.Time - TimeExtension = 5 -) - -// our extensions live here -var extensionReg = make(map[int8]func() Extension) - -// RegisterExtension registers extensions so that they -// can be initialized and returned by methods that -// decode `interface{}` values. This should only -// be called during initialization. f() should return -// a newly-initialized zero value of the extension. Keep in -// mind that extensions 3, 4, and 5 are reserved for -// complex64, complex128, and time.Time, respectively, -// and that MessagePack reserves extension types from -127 to -1. -// -// For example, if you wanted to register a user-defined struct: -// -// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) -// -// RegisterExtension will panic if you call it multiple times -// with the same 'typ' argument, or if you use a reserved -// type (3, 4, or 5). -func RegisterExtension(typ int8, f func() Extension) { - switch typ { - case Complex64Extension, Complex128Extension, TimeExtension: - panic(fmt.Sprint("msgp: forbidden extension type:", typ)) - } - if _, ok := extensionReg[typ]; ok { - panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) - } - extensionReg[typ] = f -} - -// ExtensionTypeError is an error type returned -// when there is a mis-match between an extension type -// and the type encoded on the wire -type ExtensionTypeError struct { - Got int8 - Want int8 -} - -// Error implements the error interface -func (e ExtensionTypeError) Error() string { - return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) -} - -// Resumable returns 'true' for ExtensionTypeErrors -func (e ExtensionTypeError) Resumable() bool { return true } - -func errExt(got int8, wanted int8) error { - return ExtensionTypeError{Got: got, Want: wanted} -} - -// Extension is the interface fulfilled -// by types that want to define their -// own binary encoding. -type Extension interface { - // ExtensionType should return - // a int8 that identifies the concrete - // type of the extension. (Types <0 are - // officially reserved by the MessagePack - // specifications.) - ExtensionType() int8 - - // Len should return the length - // of the data to be encoded - Len() int - - // MarshalBinaryTo should copy - // the data into the supplied slice, - // assuming that the slice has length Len() - MarshalBinaryTo([]byte) error - - UnmarshalBinary([]byte) error -} - -// RawExtension implements the Extension interface -type RawExtension struct { - Data []byte - Type int8 -} - -// ExtensionType implements Extension.ExtensionType, and returns r.Type -func (r *RawExtension) ExtensionType() int8 { return r.Type } - -// Len implements Extension.Len, and returns len(r.Data) -func (r *RawExtension) Len() int { return len(r.Data) } - -// MarshalBinaryTo implements Extension.MarshalBinaryTo, -// and returns a copy of r.Data -func (r *RawExtension) MarshalBinaryTo(d []byte) error { - copy(d, r.Data) - return nil -} - -// UnmarshalBinary implements Extension.UnmarshalBinary, -// and sets r.Data to the contents of the provided slice -func (r *RawExtension) UnmarshalBinary(b []byte) error { - if cap(r.Data) >= len(b) { - r.Data = r.Data[0:len(b)] - } else { - r.Data = make([]byte, len(b)) - } - copy(r.Data, b) - return nil -} - -// WriteExtension writes an extension type to the writer -func (mw *Writer) WriteExtension(e Extension) error { - l := e.Len() - var err error - switch l { - case 0: - o, err := mw.require(3) - if err != nil { - return err - } - mw.buf[o] = mext8 - mw.buf[o+1] = 0 - mw.buf[o+2] = byte(e.ExtensionType()) - case 1: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext1 - mw.buf[o+1] = byte(e.ExtensionType()) - case 2: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext2 - mw.buf[o+1] = byte(e.ExtensionType()) - case 4: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext4 - mw.buf[o+1] = byte(e.ExtensionType()) - case 8: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext8 - mw.buf[o+1] = byte(e.ExtensionType()) - case 16: - o, err := mw.require(2) - if err != nil { - return err - } - mw.buf[o] = mfixext16 - mw.buf[o+1] = byte(e.ExtensionType()) - default: - switch { - case l < math.MaxUint8: - o, err := mw.require(3) - if err != nil { - return err - } - mw.buf[o] = mext8 - mw.buf[o+1] = byte(uint8(l)) - mw.buf[o+2] = byte(e.ExtensionType()) - case l < math.MaxUint16: - o, err := mw.require(4) - if err != nil { - return err - } - mw.buf[o] = mext16 - big.PutUint16(mw.buf[o+1:], uint16(l)) - mw.buf[o+3] = byte(e.ExtensionType()) - default: - o, err := mw.require(6) - if err != nil { - return err - } - mw.buf[o] = mext32 - big.PutUint32(mw.buf[o+1:], uint32(l)) - mw.buf[o+5] = byte(e.ExtensionType()) - } - } - // we can only write directly to the - // buffer if we're sure that it - // fits the object - if l <= mw.bufsize() { - o, err := mw.require(l) - if err != nil { - return err - } - return e.MarshalBinaryTo(mw.buf[o:]) - } - // here we create a new buffer - // just large enough for the body - // and save it as the write buffer - err = mw.flush() - if err != nil { - return err - } - buf := make([]byte, l) - err = e.MarshalBinaryTo(buf) - if err != nil { - return err - } - mw.buf = buf - mw.wloc = l - return nil -} - -// peek at the extension type, assuming the next -// kind to be read is Extension -func (m *Reader) peekExtensionType() (int8, error) { - p, err := m.R.Peek(2) - if err != nil { - return 0, err - } - spec := sizes[p[0]] - if spec.typ != ExtensionType { - return 0, badPrefix(ExtensionType, p[0]) - } - if spec.extra == constsize { - return int8(p[1]), nil - } - size := spec.size - p, err = m.R.Peek(int(size)) - if err != nil { - return 0, err - } - return int8(p[size-1]), nil -} - -// peekExtension peeks at the extension encoding type -// (must guarantee at least 1 byte in 'b') -func peekExtension(b []byte) (int8, error) { - spec := sizes[b[0]] - size := spec.size - if spec.typ != ExtensionType { - return 0, badPrefix(ExtensionType, b[0]) - } - if len(b) < int(size) { - return 0, ErrShortBytes - } - // for fixed extensions, - // the type information is in - // the second byte - if spec.extra == constsize { - return int8(b[1]), nil - } - // otherwise, it's in the last - // part of the prefix - return int8(b[size-1]), nil -} - -// ReadExtension reads the next object from the reader -// as an extension. ReadExtension will fail if the next -// object in the stream is not an extension, or if -// e.Type() is not the same as the wire type. -func (m *Reader) ReadExtension(e Extension) (err error) { - var p []byte - p, err = m.R.Peek(2) - if err != nil { - return - } - lead := p[0] - var read int - var off int - switch lead { - case mfixext1: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(3) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(3) - } - return - - case mfixext2: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(4) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(4) - } - return - - case mfixext4: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(6) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(6) - } - return - - case mfixext8: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(10) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(10) - } - return - - case mfixext16: - if int8(p[1]) != e.ExtensionType() { - err = errExt(int8(p[1]), e.ExtensionType()) - return - } - p, err = m.R.Peek(18) - if err != nil { - return - } - err = e.UnmarshalBinary(p[2:]) - if err == nil { - _, err = m.R.Skip(18) - } - return - - case mext8: - p, err = m.R.Peek(3) - if err != nil { - return - } - if int8(p[2]) != e.ExtensionType() { - err = errExt(int8(p[2]), e.ExtensionType()) - return - } - read = int(uint8(p[1])) - off = 3 - - case mext16: - p, err = m.R.Peek(4) - if err != nil { - return - } - if int8(p[3]) != e.ExtensionType() { - err = errExt(int8(p[3]), e.ExtensionType()) - return - } - read = int(big.Uint16(p[1:])) - off = 4 - - case mext32: - p, err = m.R.Peek(6) - if err != nil { - return - } - if int8(p[5]) != e.ExtensionType() { - err = errExt(int8(p[5]), e.ExtensionType()) - return - } - read = int(big.Uint32(p[1:])) - off = 6 - - default: - err = badPrefix(ExtensionType, lead) - return - } - - p, err = m.R.Peek(read + off) - if err != nil { - return - } - err = e.UnmarshalBinary(p[off:]) - if err == nil { - _, err = m.R.Skip(read + off) - } - return -} - -// AppendExtension appends a MessagePack extension to the provided slice -func AppendExtension(b []byte, e Extension) ([]byte, error) { - l := e.Len() - var o []byte - var n int - switch l { - case 0: - o, n = ensure(b, 3) - o[n] = mext8 - o[n+1] = 0 - o[n+2] = byte(e.ExtensionType()) - return o[:n+3], nil - case 1: - o, n = ensure(b, 3) - o[n] = mfixext1 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 2: - o, n = ensure(b, 4) - o[n] = mfixext2 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 4: - o, n = ensure(b, 6) - o[n] = mfixext4 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 8: - o, n = ensure(b, 10) - o[n] = mfixext8 - o[n+1] = byte(e.ExtensionType()) - n += 2 - case 16: - o, n = ensure(b, 18) - o[n] = mfixext16 - o[n+1] = byte(e.ExtensionType()) - n += 2 - default: - switch { - case l < math.MaxUint8: - o, n = ensure(b, l+3) - o[n] = mext8 - o[n+1] = byte(uint8(l)) - o[n+2] = byte(e.ExtensionType()) - n += 3 - case l < math.MaxUint16: - o, n = ensure(b, l+4) - o[n] = mext16 - big.PutUint16(o[n+1:], uint16(l)) - o[n+3] = byte(e.ExtensionType()) - n += 4 - default: - o, n = ensure(b, l+6) - o[n] = mext32 - big.PutUint32(o[n+1:], uint32(l)) - o[n+5] = byte(e.ExtensionType()) - n += 6 - } - } - return o, e.MarshalBinaryTo(o[n:]) -} - -// ReadExtensionBytes reads an extension from 'b' into 'e' -// and returns any remaining bytes. -// Possible errors: -// - ErrShortBytes ('b' not long enough) -// - ExtensionTypeErorr{} (wire type not the same as e.Type()) -// - TypeErorr{} (next object not an extension) -// - InvalidPrefixError -// - An umarshal error returned from e.UnmarshalBinary -func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { - l := len(b) - if l < 3 { - return b, ErrShortBytes - } - lead := b[0] - var ( - sz int // size of 'data' - off int // offset of 'data' - typ int8 - ) - switch lead { - case mfixext1: - typ = int8(b[1]) - sz = 1 - off = 2 - case mfixext2: - typ = int8(b[1]) - sz = 2 - off = 2 - case mfixext4: - typ = int8(b[1]) - sz = 4 - off = 2 - case mfixext8: - typ = int8(b[1]) - sz = 8 - off = 2 - case mfixext16: - typ = int8(b[1]) - sz = 16 - off = 2 - case mext8: - sz = int(uint8(b[1])) - typ = int8(b[2]) - off = 3 - if sz == 0 { - return b[3:], e.UnmarshalBinary(b[3:3]) - } - case mext16: - if l < 4 { - return b, ErrShortBytes - } - sz = int(big.Uint16(b[1:])) - typ = int8(b[3]) - off = 4 - case mext32: - if l < 6 { - return b, ErrShortBytes - } - sz = int(big.Uint32(b[1:])) - typ = int8(b[5]) - off = 6 - default: - return b, badPrefix(ExtensionType, lead) - } - - if typ != e.ExtensionType() { - return b, errExt(typ, e.ExtensionType()) - } - - // the data of the extension starts - // at 'off' and is 'sz' bytes long - if len(b[off:]) < sz { - return b, ErrShortBytes - } - tot := off + sz - return b[tot:], e.UnmarshalBinary(b[off:tot]) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go deleted file mode 100644 index 8e7370ebc..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/file.go +++ /dev/null @@ -1,92 +0,0 @@ -// +build linux darwin dragonfly freebsd netbsd openbsd -// +build !appengine - -package msgp - -import ( - "os" - "syscall" -) - -// ReadFile reads a file into 'dst' using -// a read-only memory mapping. Consequently, -// the file must be mmap-able, and the -// Unmarshaler should never write to -// the source memory. (Methods generated -// by the msgp tool obey that constraint, but -// user-defined implementations may not.) -// -// Reading and writing through file mappings -// is only efficient for large files; small -// files are best read and written using -// the ordinary streaming interfaces. -// -func ReadFile(dst Unmarshaler, file *os.File) error { - stat, err := file.Stat() - if err != nil { - return err - } - data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return err - } - adviseRead(data) - _, err = dst.UnmarshalMsg(data) - uerr := syscall.Munmap(data) - if err == nil { - err = uerr - } - return err -} - -// MarshalSizer is the combination -// of the Marshaler and Sizer -// interfaces. -type MarshalSizer interface { - Marshaler - Sizer -} - -// WriteFile writes a file from 'src' using -// memory mapping. It overwrites the entire -// contents of the previous file. -// The mapping size is calculated -// using the `Msgsize()` method -// of 'src', so it must produce a result -// equal to or greater than the actual encoded -// size of the object. Otherwise, -// a fault (SIGBUS) will occur. -// -// Reading and writing through file mappings -// is only efficient for large files; small -// files are best read and written using -// the ordinary streaming interfaces. -// -// NOTE: The performance of this call -// is highly OS- and filesystem-dependent. -// Users should take care to test that this -// performs as expected in a production environment. -// (Linux users should run a kernel and filesystem -// that support fallocate(2) for the best results.) -func WriteFile(src MarshalSizer, file *os.File) error { - sz := src.Msgsize() - err := fallocate(file, int64(sz)) - if err != nil { - return err - } - data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) - if err != nil { - return err - } - adviseWrite(data) - chunk := data[:0] - chunk, err = src.MarshalMsg(chunk) - if err != nil { - return err - } - uerr := syscall.Munmap(data) - if uerr != nil { - return uerr - } - return file.Truncate(int64(len(chunk))) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go deleted file mode 100644 index 6e654dbdc..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/file_port.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build windows appengine - -package msgp - -import ( - "io/ioutil" - "os" -) - -// MarshalSizer is the combination -// of the Marshaler and Sizer -// interfaces. -type MarshalSizer interface { - Marshaler - Sizer -} - -func ReadFile(dst Unmarshaler, file *os.File) error { - if u, ok := dst.(Decodable); ok { - return u.DecodeMsg(NewReader(file)) - } - - data, err := ioutil.ReadAll(file) - if err != nil { - return err - } - _, err = dst.UnmarshalMsg(data) - return err -} - -func WriteFile(src MarshalSizer, file *os.File) error { - if e, ok := src.(Encodable); ok { - w := NewWriter(file) - err := e.EncodeMsg(w) - if err == nil { - err = w.Flush() - } - return err - } - - raw, err := src.MarshalMsg(nil) - if err != nil { - return err - } - _, err = file.Write(raw) - return err -} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go deleted file mode 100644 index f817d7759..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/integers.go +++ /dev/null @@ -1,174 +0,0 @@ -package msgp - -/* ---------------------------------- - integer encoding utilities - (inline-able) - - TODO(tinylib): there are faster, - albeit non-portable solutions - to the code below. implement - byteswap? - ---------------------------------- */ - -func putMint64(b []byte, i int64) { - b[0] = mint64 - b[1] = byte(i >> 56) - b[2] = byte(i >> 48) - b[3] = byte(i >> 40) - b[4] = byte(i >> 32) - b[5] = byte(i >> 24) - b[6] = byte(i >> 16) - b[7] = byte(i >> 8) - b[8] = byte(i) -} - -func getMint64(b []byte) int64 { - return (int64(b[1]) << 56) | (int64(b[2]) << 48) | - (int64(b[3]) << 40) | (int64(b[4]) << 32) | - (int64(b[5]) << 24) | (int64(b[6]) << 16) | - (int64(b[7]) << 8) | (int64(b[8])) -} - -func putMint32(b []byte, i int32) { - b[0] = mint32 - b[1] = byte(i >> 24) - b[2] = byte(i >> 16) - b[3] = byte(i >> 8) - b[4] = byte(i) -} - -func getMint32(b []byte) int32 { - return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) -} - -func putMint16(b []byte, i int16) { - b[0] = mint16 - b[1] = byte(i >> 8) - b[2] = byte(i) -} - -func getMint16(b []byte) (i int16) { - return (int16(b[1]) << 8) | int16(b[2]) -} - -func putMint8(b []byte, i int8) { - b[0] = mint8 - b[1] = byte(i) -} - -func getMint8(b []byte) (i int8) { - return int8(b[1]) -} - -func putMuint64(b []byte, u uint64) { - b[0] = muint64 - b[1] = byte(u >> 56) - b[2] = byte(u >> 48) - b[3] = byte(u >> 40) - b[4] = byte(u >> 32) - b[5] = byte(u >> 24) - b[6] = byte(u >> 16) - b[7] = byte(u >> 8) - b[8] = byte(u) -} - -func getMuint64(b []byte) uint64 { - return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | - (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | - (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | - (uint64(b[7]) << 8) | (uint64(b[8])) -} - -func putMuint32(b []byte, u uint32) { - b[0] = muint32 - b[1] = byte(u >> 24) - b[2] = byte(u >> 16) - b[3] = byte(u >> 8) - b[4] = byte(u) -} - -func getMuint32(b []byte) uint32 { - return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) -} - -func putMuint16(b []byte, u uint16) { - b[0] = muint16 - b[1] = byte(u >> 8) - b[2] = byte(u) -} - -func getMuint16(b []byte) uint16 { - return (uint16(b[1]) << 8) | uint16(b[2]) -} - -func putMuint8(b []byte, u uint8) { - b[0] = muint8 - b[1] = byte(u) -} - -func getMuint8(b []byte) uint8 { - return uint8(b[1]) -} - -func getUnix(b []byte) (sec int64, nsec int32) { - sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | - (int64(b[2]) << 40) | (int64(b[3]) << 32) | - (int64(b[4]) << 24) | (int64(b[5]) << 16) | - (int64(b[6]) << 8) | (int64(b[7])) - - nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) - return -} - -func putUnix(b []byte, sec int64, nsec int32) { - b[0] = byte(sec >> 56) - b[1] = byte(sec >> 48) - b[2] = byte(sec >> 40) - b[3] = byte(sec >> 32) - b[4] = byte(sec >> 24) - b[5] = byte(sec >> 16) - b[6] = byte(sec >> 8) - b[7] = byte(sec) - b[8] = byte(nsec >> 24) - b[9] = byte(nsec >> 16) - b[10] = byte(nsec >> 8) - b[11] = byte(nsec) -} - -/* ----------------------------- - prefix utilities - ----------------------------- */ - -// write prefix and uint8 -func prefixu8(b []byte, pre byte, sz uint8) { - b[0] = pre - b[1] = byte(sz) -} - -// write prefix and big-endian uint16 -func prefixu16(b []byte, pre byte, sz uint16) { - b[0] = pre - b[1] = byte(sz >> 8) - b[2] = byte(sz) -} - -// write prefix and big-endian uint32 -func prefixu32(b []byte, pre byte, sz uint32) { - b[0] = pre - b[1] = byte(sz >> 24) - b[2] = byte(sz >> 16) - b[3] = byte(sz >> 8) - b[4] = byte(sz) -} - -func prefixu64(b []byte, pre byte, sz uint64) { - b[0] = pre - b[1] = byte(sz >> 56) - b[2] = byte(sz >> 48) - b[3] = byte(sz >> 40) - b[4] = byte(sz >> 32) - b[5] = byte(sz >> 24) - b[6] = byte(sz >> 16) - b[7] = byte(sz >> 8) - b[8] = byte(sz) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go deleted file mode 100644 index 4325860ad..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/json.go +++ /dev/null @@ -1,542 +0,0 @@ -package msgp - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "io" - "strconv" - "unicode/utf8" -) - -var ( - null = []byte("null") - hex = []byte("0123456789abcdef") -) - -var defuns [_maxtype]func(jsWriter, *Reader) (int, error) - -// note: there is an initialization loop if -// this isn't set up during init() -func init() { - // since none of these functions are inline-able, - // there is not much of a penalty to the indirect - // call. however, this is best expressed as a jump-table... - defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ - StrType: rwString, - BinType: rwBytes, - MapType: rwMap, - ArrayType: rwArray, - Float64Type: rwFloat64, - Float32Type: rwFloat32, - BoolType: rwBool, - IntType: rwInt, - UintType: rwUint, - NilType: rwNil, - ExtensionType: rwExtension, - Complex64Type: rwExtension, - Complex128Type: rwExtension, - TimeType: rwTime, - } -} - -// this is the interface -// used to write json -type jsWriter interface { - io.Writer - io.ByteWriter - WriteString(string) (int, error) -} - -// CopyToJSON reads MessagePack from 'src' and copies it -// as JSON to 'dst' until EOF. -func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { - r := NewReader(src) - n, err = r.WriteToJSON(dst) - freeR(r) - return -} - -// WriteToJSON translates MessagePack from 'r' and writes it as -// JSON to 'w' until the underlying reader returns io.EOF. It returns -// the number of bytes written, and an error if it stopped before EOF. -func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { - var j jsWriter - var bf *bufio.Writer - if jsw, ok := w.(jsWriter); ok { - j = jsw - } else { - bf = bufio.NewWriter(w) - j = bf - } - var nn int - for err == nil { - nn, err = rwNext(j, r) - n += int64(nn) - } - if err != io.EOF { - if bf != nil { - bf.Flush() - } - return - } - err = nil - if bf != nil { - err = bf.Flush() - } - return -} - -func rwNext(w jsWriter, src *Reader) (int, error) { - t, err := src.NextType() - if err != nil { - return 0, err - } - return defuns[t](w, src) -} - -func rwMap(dst jsWriter, src *Reader) (n int, err error) { - var comma bool - var sz uint32 - var field []byte - - sz, err = src.ReadMapHeader() - if err != nil { - return - } - - if sz == 0 { - return dst.WriteString("{}") - } - - err = dst.WriteByte('{') - if err != nil { - return - } - n++ - var nn int - for i := uint32(0); i < sz; i++ { - if comma { - err = dst.WriteByte(',') - if err != nil { - return - } - n++ - } - - field, err = src.ReadMapKeyPtr() - if err != nil { - return - } - nn, err = rwquoted(dst, field) - n += nn - if err != nil { - return - } - - err = dst.WriteByte(':') - if err != nil { - return - } - n++ - nn, err = rwNext(dst, src) - n += nn - if err != nil { - return - } - if !comma { - comma = true - } - } - - err = dst.WriteByte('}') - if err != nil { - return - } - n++ - return -} - -func rwArray(dst jsWriter, src *Reader) (n int, err error) { - err = dst.WriteByte('[') - if err != nil { - return - } - var sz uint32 - var nn int - sz, err = src.ReadArrayHeader() - if err != nil { - return - } - comma := false - for i := uint32(0); i < sz; i++ { - if comma { - err = dst.WriteByte(',') - if err != nil { - return - } - n++ - } - nn, err = rwNext(dst, src) - n += nn - if err != nil { - return - } - comma = true - } - - err = dst.WriteByte(']') - if err != nil { - return - } - n++ - return -} - -func rwNil(dst jsWriter, src *Reader) (int, error) { - err := src.ReadNil() - if err != nil { - return 0, err - } - return dst.Write(null) -} - -func rwFloat32(dst jsWriter, src *Reader) (int, error) { - f, err := src.ReadFloat32() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64) - return dst.Write(src.scratch) -} - -func rwFloat64(dst jsWriter, src *Reader) (int, error) { - f, err := src.ReadFloat64() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32) - return dst.Write(src.scratch) -} - -func rwInt(dst jsWriter, src *Reader) (int, error) { - i, err := src.ReadInt64() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) - return dst.Write(src.scratch) -} - -func rwUint(dst jsWriter, src *Reader) (int, error) { - u, err := src.ReadUint64() - if err != nil { - return 0, err - } - src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) - return dst.Write(src.scratch) -} - -func rwBool(dst jsWriter, src *Reader) (int, error) { - b, err := src.ReadBool() - if err != nil { - return 0, err - } - if b { - return dst.WriteString("true") - } - return dst.WriteString("false") -} - -func rwTime(dst jsWriter, src *Reader) (int, error) { - t, err := src.ReadTime() - if err != nil { - return 0, err - } - bts, err := t.MarshalJSON() - if err != nil { - return 0, err - } - return dst.Write(bts) -} - -func rwExtension(dst jsWriter, src *Reader) (n int, err error) { - et, err := src.peekExtensionType() - if err != nil { - return 0, err - } - - // registered extensions can override - // the JSON encoding - if j, ok := extensionReg[et]; ok { - var bts []byte - e := j() - err = src.ReadExtension(e) - if err != nil { - return - } - bts, err = json.Marshal(e) - if err != nil { - return - } - return dst.Write(bts) - } - - e := RawExtension{} - e.Type = et - err = src.ReadExtension(&e) - if err != nil { - return - } - - var nn int - err = dst.WriteByte('{') - if err != nil { - return - } - n++ - - nn, err = dst.WriteString(`"type:"`) - n += nn - if err != nil { - return - } - - src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) - nn, err = dst.Write(src.scratch) - n += nn - if err != nil { - return - } - - nn, err = dst.WriteString(`,"data":"`) - n += nn - if err != nil { - return - } - - enc := base64.NewEncoder(base64.StdEncoding, dst) - - nn, err = enc.Write(e.Data) - n += nn - if err != nil { - return - } - err = enc.Close() - if err != nil { - return - } - nn, err = dst.WriteString(`"}`) - n += nn - return -} - -func rwString(dst jsWriter, src *Reader) (n int, err error) { - var p []byte - p, err = src.R.Peek(1) - if err != nil { - return - } - lead := p[0] - var read int - - if isfixstr(lead) { - read = int(rfixstr(lead)) - src.R.Skip(1) - goto write - } - - switch lead { - case mstr8: - p, err = src.R.Next(2) - if err != nil { - return - } - read = int(uint8(p[1])) - case mstr16: - p, err = src.R.Next(3) - if err != nil { - return - } - read = int(big.Uint16(p[1:])) - case mstr32: - p, err = src.R.Next(5) - if err != nil { - return - } - read = int(big.Uint32(p[1:])) - default: - err = badPrefix(StrType, lead) - return - } -write: - p, err = src.R.Next(read) - if err != nil { - return - } - n, err = rwquoted(dst, p) - return -} - -func rwBytes(dst jsWriter, src *Reader) (n int, err error) { - var nn int - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - src.scratch, err = src.ReadBytes(src.scratch[:0]) - if err != nil { - return - } - enc := base64.NewEncoder(base64.StdEncoding, dst) - nn, err = enc.Write(src.scratch) - n += nn - if err != nil { - return - } - err = enc.Close() - if err != nil { - return - } - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - return -} - -// Below (c) The Go Authors, 2009-2014 -// Subject to the BSD-style license found at http://golang.org -// -// see: encoding/json/encode.go:(*encodeState).stringbytes() -func rwquoted(dst jsWriter, s []byte) (n int, err error) { - var nn int - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - nn, err = dst.Write(s[start:i]) - n += nn - if err != nil { - return - } - } - switch b { - case '\\', '"': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte(b) - if err != nil { - return - } - n++ - case '\n': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte('n') - if err != nil { - return - } - n++ - case '\r': - err = dst.WriteByte('\\') - if err != nil { - return - } - n++ - err = dst.WriteByte('r') - if err != nil { - return - } - n++ - default: - nn, err = dst.WriteString(`\u00`) - n += nn - if err != nil { - return - } - err = dst.WriteByte(hex[b>>4]) - if err != nil { - return - } - n++ - err = dst.WriteByte(hex[b&0xF]) - if err != nil { - return - } - n++ - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - nn, err = dst.Write(s[start:i]) - n += nn - if err != nil { - return - } - nn, err = dst.WriteString(`\ufffd`) - n += nn - if err != nil { - return - } - i += size - start = i - continue - } - } - if c == '\u2028' || c == '\u2029' { - if start < i { - nn, err = dst.Write(s[start:i]) - n += nn - if err != nil { - return - } - nn, err = dst.WriteString(`\u202`) - n += nn - if err != nil { - return - } - err = dst.WriteByte(hex[c&0xF]) - if err != nil { - return - } - n++ - } - } - i += size - } - if start < len(s) { - nn, err = dst.Write(s[start:]) - n += nn - if err != nil { - return - } - } - err = dst.WriteByte('"') - if err != nil { - return - } - n++ - return -} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go deleted file mode 100644 index 438caf539..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go +++ /dev/null @@ -1,363 +0,0 @@ -package msgp - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "io" - "strconv" - "time" -) - -var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) - -func init() { - - // NOTE(pmh): this is best expressed as a jump table, - // but gc doesn't do that yet. revisit post-go1.5. - unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ - StrType: rwStringBytes, - BinType: rwBytesBytes, - MapType: rwMapBytes, - ArrayType: rwArrayBytes, - Float64Type: rwFloat64Bytes, - Float32Type: rwFloat32Bytes, - BoolType: rwBoolBytes, - IntType: rwIntBytes, - UintType: rwUintBytes, - NilType: rwNullBytes, - ExtensionType: rwExtensionBytes, - Complex64Type: rwExtensionBytes, - Complex128Type: rwExtensionBytes, - TimeType: rwTimeBytes, - } -} - -// UnmarshalAsJSON takes raw messagepack and writes -// it as JSON to 'w'. If an error is returned, the -// bytes not translated will also be returned. If -// no errors are encountered, the length of the returned -// slice will be zero. -func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { - var ( - scratch []byte - cast bool - dst jsWriter - err error - ) - if jsw, ok := w.(jsWriter); ok { - dst = jsw - cast = true - } else { - dst = bufio.NewWriterSize(w, 512) - } - for len(msg) > 0 && err == nil { - msg, scratch, err = writeNext(dst, msg, scratch) - } - if !cast && err == nil { - err = dst.(*bufio.Writer).Flush() - } - return msg, err -} - -func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - if len(msg) < 1 { - return msg, scratch, ErrShortBytes - } - t := getType(msg[0]) - if t == InvalidType { - return msg, scratch, InvalidPrefixError(msg[0]) - } - if t == ExtensionType { - et, err := peekExtension(msg) - if err != nil { - return nil, scratch, err - } - if et == TimeExtension { - t = TimeType - } - } - return unfuns[t](w, msg, scratch) -} - -func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - sz, msg, err := ReadArrayHeaderBytes(msg) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte('[') - if err != nil { - return msg, scratch, err - } - for i := uint32(0); i < sz; i++ { - if i != 0 { - err = w.WriteByte(',') - if err != nil { - return msg, scratch, err - } - } - msg, scratch, err = writeNext(w, msg, scratch) - if err != nil { - return msg, scratch, err - } - } - err = w.WriteByte(']') - return msg, scratch, err -} - -func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - sz, msg, err := ReadMapHeaderBytes(msg) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte('{') - if err != nil { - return msg, scratch, err - } - for i := uint32(0); i < sz; i++ { - if i != 0 { - err = w.WriteByte(',') - if err != nil { - return msg, scratch, err - } - } - msg, scratch, err = rwMapKeyBytes(w, msg, scratch) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte(':') - if err != nil { - return msg, scratch, err - } - msg, scratch, err = writeNext(w, msg, scratch) - if err != nil { - return msg, scratch, err - } - } - err = w.WriteByte('}') - return msg, scratch, err -} - -func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - msg, scratch, err := rwStringBytes(w, msg, scratch) - if err != nil { - if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return rwBytesBytes(w, msg, scratch) - } - } - return msg, scratch, err -} - -func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - str, msg, err := ReadStringZC(msg) - if err != nil { - return msg, scratch, err - } - _, err = rwquoted(w, str) - return msg, scratch, err -} - -func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - bts, msg, err := ReadBytesZC(msg) - if err != nil { - return msg, scratch, err - } - l := base64.StdEncoding.EncodedLen(len(bts)) - if cap(scratch) >= l { - scratch = scratch[0:l] - } else { - scratch = make([]byte, l) - } - base64.StdEncoding.Encode(scratch, bts) - err = w.WriteByte('"') - if err != nil { - return msg, scratch, err - } - _, err = w.Write(scratch) - if err != nil { - return msg, scratch, err - } - err = w.WriteByte('"') - return msg, scratch, err -} - -func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - msg, err := ReadNilBytes(msg) - if err != nil { - return msg, scratch, err - } - _, err = w.Write(null) - return msg, scratch, err -} - -func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - b, msg, err := ReadBoolBytes(msg) - if err != nil { - return msg, scratch, err - } - if b { - _, err = w.WriteString("true") - return msg, scratch, err - } - _, err = w.WriteString("false") - return msg, scratch, err -} - -func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - i, msg, err := ReadInt64Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendInt(scratch[0:0], i, 10) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - u, msg, err := ReadUint64Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendUint(scratch[0:0], u, 10) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { - var f float64 - var err error - var sz int - if f64 { - sz = 64 - f, msg, err = ReadFloat64Bytes(msg) - } else { - sz = 32 - var v float32 - v, msg, err = ReadFloat32Bytes(msg) - f = float64(v) - } - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var f float32 - var err error - f, msg, err = ReadFloat32Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var f float64 - var err error - f, msg, err = ReadFloat64Bytes(msg) - if err != nil { - return msg, scratch, err - } - scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) - _, err = w.Write(scratch) - return msg, scratch, err -} - -func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var t time.Time - var err error - t, msg, err = ReadTimeBytes(msg) - if err != nil { - return msg, scratch, err - } - bts, err := t.MarshalJSON() - if err != nil { - return msg, scratch, err - } - _, err = w.Write(bts) - return msg, scratch, err -} - -func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - var err error - var et int8 - et, err = peekExtension(msg) - if err != nil { - return msg, scratch, err - } - - // if it's time.Time - if et == TimeExtension { - var tm time.Time - tm, msg, err = ReadTimeBytes(msg) - if err != nil { - return msg, scratch, err - } - bts, err := tm.MarshalJSON() - if err != nil { - return msg, scratch, err - } - _, err = w.Write(bts) - return msg, scratch, err - } - - // if the extension is registered, - // use its canonical JSON form - if f, ok := extensionReg[et]; ok { - e := f() - msg, err = ReadExtensionBytes(msg, e) - if err != nil { - return msg, scratch, err - } - bts, err := json.Marshal(e) - if err != nil { - return msg, scratch, err - } - _, err = w.Write(bts) - return msg, scratch, err - } - - // otherwise, write `{"type": , "data": ""}` - r := RawExtension{} - r.Type = et - msg, err = ReadExtensionBytes(msg, &r) - if err != nil { - return msg, scratch, err - } - scratch, err = writeExt(w, r, scratch) - return msg, scratch, err -} - -func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { - _, err := w.WriteString(`{"type":`) - if err != nil { - return scratch, err - } - scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) - _, err = w.Write(scratch) - if err != nil { - return scratch, err - } - _, err = w.WriteString(`,"data":"`) - if err != nil { - return scratch, err - } - l := base64.StdEncoding.EncodedLen(len(r.Data)) - if cap(scratch) >= l { - scratch = scratch[0:l] - } else { - scratch = make([]byte, l) - } - base64.StdEncoding.Encode(scratch, r.Data) - _, err = w.Write(scratch) - if err != nil { - return scratch, err - } - _, err = w.WriteString(`"}`) - return scratch, err -} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go deleted file mode 100644 index ad07ef995..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/number.go +++ /dev/null @@ -1,267 +0,0 @@ -package msgp - -import ( - "math" - "strconv" -) - -// The portable parts of the Number implementation - -// Number can be -// an int64, uint64, float32, -// or float64 internally. -// It can decode itself -// from any of the native -// messagepack number types. -// The zero-value of Number -// is Int(0). Using the equality -// operator with Number compares -// both the type and the value -// of the number. -type Number struct { - // internally, this - // is just a tagged union. - // the raw bits of the number - // are stored the same way regardless. - bits uint64 - typ Type -} - -// AsInt sets the number to an int64. -func (n *Number) AsInt(i int64) { - - // we always store int(0) - // as {0, InvalidType} in - // order to preserve - // the behavior of the == operator - if i == 0 { - n.typ = InvalidType - n.bits = 0 - return - } - - n.typ = IntType - n.bits = uint64(i) -} - -// AsUint sets the number to a uint64. -func (n *Number) AsUint(u uint64) { - n.typ = UintType - n.bits = u -} - -// AsFloat32 sets the value of the number -// to a float32. -func (n *Number) AsFloat32(f float32) { - n.typ = Float32Type - n.bits = uint64(math.Float32bits(f)) -} - -// AsFloat64 sets the value of the -// number to a float64. -func (n *Number) AsFloat64(f float64) { - n.typ = Float64Type - n.bits = math.Float64bits(f) -} - -// Int casts the number as an int64, and -// returns whether or not that was the -// underlying type. -func (n *Number) Int() (int64, bool) { - return int64(n.bits), n.typ == IntType || n.typ == InvalidType -} - -// Uint casts the number as a uint64, and returns -// whether or not that was the underlying type. -func (n *Number) Uint() (uint64, bool) { - return n.bits, n.typ == UintType -} - -// Float casts the number to a float64, and -// returns whether or not that was the underlying -// type (either a float64 or a float32). -func (n *Number) Float() (float64, bool) { - switch n.typ { - case Float32Type: - return float64(math.Float32frombits(uint32(n.bits))), true - case Float64Type: - return math.Float64frombits(n.bits), true - default: - return 0.0, false - } -} - -// Type will return one of: -// Float64Type, Float32Type, UintType, or IntType. -func (n *Number) Type() Type { - if n.typ == InvalidType { - return IntType - } - return n.typ -} - -// DecodeMsg implements msgp.Decodable -func (n *Number) DecodeMsg(r *Reader) error { - typ, err := r.NextType() - if err != nil { - return err - } - switch typ { - case Float32Type: - f, err := r.ReadFloat32() - if err != nil { - return err - } - n.AsFloat32(f) - return nil - case Float64Type: - f, err := r.ReadFloat64() - if err != nil { - return err - } - n.AsFloat64(f) - return nil - case IntType: - i, err := r.ReadInt64() - if err != nil { - return err - } - n.AsInt(i) - return nil - case UintType: - u, err := r.ReadUint64() - if err != nil { - return err - } - n.AsUint(u) - return nil - default: - return TypeError{Encoded: typ, Method: IntType} - } -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { - typ := NextType(b) - switch typ { - case IntType: - i, o, err := ReadInt64Bytes(b) - if err != nil { - return b, err - } - n.AsInt(i) - return o, nil - case UintType: - u, o, err := ReadUint64Bytes(b) - if err != nil { - return b, err - } - n.AsUint(u) - return o, nil - case Float64Type: - f, o, err := ReadFloat64Bytes(b) - if err != nil { - return b, err - } - n.AsFloat64(f) - return o, nil - case Float32Type: - f, o, err := ReadFloat32Bytes(b) - if err != nil { - return b, err - } - n.AsFloat32(f) - return o, nil - default: - return b, TypeError{Method: IntType, Encoded: typ} - } -} - -// MarshalMsg implements msgp.Marshaler -func (n *Number) MarshalMsg(b []byte) ([]byte, error) { - switch n.typ { - case IntType: - return AppendInt64(b, int64(n.bits)), nil - case UintType: - return AppendUint64(b, uint64(n.bits)), nil - case Float64Type: - return AppendFloat64(b, math.Float64frombits(n.bits)), nil - case Float32Type: - return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil - default: - return AppendInt64(b, 0), nil - } -} - -// EncodeMsg implements msgp.Encodable -func (n *Number) EncodeMsg(w *Writer) error { - switch n.typ { - case IntType: - return w.WriteInt64(int64(n.bits)) - case UintType: - return w.WriteUint64(n.bits) - case Float64Type: - return w.WriteFloat64(math.Float64frombits(n.bits)) - case Float32Type: - return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) - default: - return w.WriteInt64(0) - } -} - -// Msgsize implements msgp.Sizer -func (n *Number) Msgsize() int { - switch n.typ { - case Float32Type: - return Float32Size - case Float64Type: - return Float64Size - case IntType: - return Int64Size - case UintType: - return Uint64Size - default: - return 1 // fixint(0) - } -} - -// MarshalJSON implements json.Marshaler -func (n *Number) MarshalJSON() ([]byte, error) { - t := n.Type() - if t == InvalidType { - return []byte{'0'}, nil - } - out := make([]byte, 0, 32) - switch t { - case Float32Type, Float64Type: - f, _ := n.Float() - return strconv.AppendFloat(out, f, 'f', -1, 64), nil - case IntType: - i, _ := n.Int() - return strconv.AppendInt(out, i, 10), nil - case UintType: - u, _ := n.Uint() - return strconv.AppendUint(out, u, 10), nil - default: - panic("(*Number).typ is invalid") - } -} - -// String implements fmt.Stringer -func (n *Number) String() string { - switch n.typ { - case InvalidType: - return "0" - case Float32Type, Float64Type: - f, _ := n.Float() - return strconv.FormatFloat(f, 'f', -1, 64) - case IntType: - i, _ := n.Int() - return strconv.FormatInt(i, 10) - case UintType: - u, _ := n.Uint() - return strconv.FormatUint(u, 10) - default: - panic("(*Number).typ is invalid") - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go deleted file mode 100644 index c828f7eca..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/purego.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build purego appengine - -package msgp - -// let's just assume appengine -// uses 64-bit hardware... -const smallint = false - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go deleted file mode 100644 index aa668c573..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/read.go +++ /dev/null @@ -1,1358 +0,0 @@ -package msgp - -import ( - "io" - "math" - "sync" - "time" - - "github.com/philhofer/fwd" -) - -// where we keep old *Readers -var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} - -// Type is a MessagePack wire type, -// including this package's built-in -// extension types. -type Type byte - -// MessagePack Types -// -// The zero value of Type -// is InvalidType. -const ( - InvalidType Type = iota - - // MessagePack built-in types - - StrType - BinType - MapType - ArrayType - Float64Type - Float32Type - BoolType - IntType - UintType - NilType - ExtensionType - - // pseudo-types provided - // by extensions - - Complex64Type - Complex128Type - TimeType - - _maxtype -) - -// String implements fmt.Stringer -func (t Type) String() string { - switch t { - case StrType: - return "str" - case BinType: - return "bin" - case MapType: - return "map" - case ArrayType: - return "array" - case Float64Type: - return "float64" - case Float32Type: - return "float32" - case BoolType: - return "bool" - case UintType: - return "uint" - case IntType: - return "int" - case ExtensionType: - return "ext" - case NilType: - return "nil" - default: - return "" - } -} - -func freeR(m *Reader) { - readerPool.Put(m) -} - -// Unmarshaler is the interface fulfilled -// by objects that know how to unmarshal -// themselves from MessagePack. -// UnmarshalMsg unmarshals the object -// from binary, returing any leftover -// bytes and any errors encountered. -type Unmarshaler interface { - UnmarshalMsg([]byte) ([]byte, error) -} - -// Decodable is the interface fulfilled -// by objects that know how to read -// themselves from a *Reader. -type Decodable interface { - DecodeMsg(*Reader) error -} - -// Decode decodes 'd' from 'r'. -func Decode(r io.Reader, d Decodable) error { - rd := NewReader(r) - err := d.DecodeMsg(rd) - freeR(rd) - return err -} - -// NewReader returns a *Reader that -// reads from the provided reader. The -// reader will be buffered. -func NewReader(r io.Reader) *Reader { - p := readerPool.Get().(*Reader) - if p.R == nil { - p.R = fwd.NewReader(r) - } else { - p.R.Reset(r) - } - return p -} - -// NewReaderSize returns a *Reader with a buffer of the given size. -// (This is vastly preferable to passing the decoder a reader that is already buffered.) -func NewReaderSize(r io.Reader, sz int) *Reader { - return &Reader{R: fwd.NewReaderSize(r, sz)} -} - -// Reader wraps an io.Reader and provides -// methods to read MessagePack-encoded values -// from it. Readers are buffered. -type Reader struct { - // R is the buffered reader - // that the Reader uses - // to decode MessagePack. - // The Reader itself - // is stateless; all the - // buffering is done - // within R. - R *fwd.Reader - scratch []byte -} - -// Read implements `io.Reader` -func (m *Reader) Read(p []byte) (int, error) { - return m.R.Read(p) -} - -// CopyNext reads the next object from m without decoding it and writes it to w. -// It avoids unnecessary copies internally. -func (m *Reader) CopyNext(w io.Writer) (int64, error) { - sz, o, err := getNextSize(m.R) - if err != nil { - return 0, err - } - - var n int64 - // Opportunistic optimization: if we can fit the whole thing in the m.R - // buffer, then just get a pointer to that, and pass it to w.Write, - // avoiding an allocation. - if int(sz) <= m.R.BufferSize() { - var nn int - var buf []byte - buf, err = m.R.Next(int(sz)) - if err != nil { - if err == io.ErrUnexpectedEOF { - err = ErrShortBytes - } - return 0, err - } - nn, err = w.Write(buf) - n += int64(nn) - } else { - // Fall back to io.CopyN. - // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) - n, err = io.CopyN(w, m.R, int64(sz)) - if err == io.ErrUnexpectedEOF { - err = ErrShortBytes - } - } - if err != nil { - return n, err - } else if n < int64(sz) { - return n, io.ErrShortWrite - } - - // for maps and slices, read elements - for x := uintptr(0); x < o; x++ { - var n2 int64 - n2, err = m.CopyNext(w) - if err != nil { - return n, err - } - n += n2 - } - return n, nil -} - -// ReadFull implements `io.ReadFull` -func (m *Reader) ReadFull(p []byte) (int, error) { - return m.R.ReadFull(p) -} - -// Reset resets the underlying reader. -func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } - -// Buffered returns the number of bytes currently in the read buffer. -func (m *Reader) Buffered() int { return m.R.Buffered() } - -// BufferSize returns the capacity of the read buffer. -func (m *Reader) BufferSize() int { return m.R.BufferSize() } - -// NextType returns the next object type to be decoded. -func (m *Reader) NextType() (Type, error) { - p, err := m.R.Peek(1) - if err != nil { - return InvalidType, err - } - t := getType(p[0]) - if t == InvalidType { - return t, InvalidPrefixError(p[0]) - } - if t == ExtensionType { - v, err := m.peekExtensionType() - if err != nil { - return InvalidType, err - } - switch v { - case Complex64Extension: - return Complex64Type, nil - case Complex128Extension: - return Complex128Type, nil - case TimeExtension: - return TimeType, nil - } - } - return t, nil -} - -// IsNil returns whether or not -// the next byte is a null messagepack byte -func (m *Reader) IsNil() bool { - p, err := m.R.Peek(1) - return err == nil && p[0] == mnil -} - -// getNextSize returns the size of the next object on the wire. -// returns (obj size, obj elements, error) -// only maps and arrays have non-zero obj elements -// for maps and arrays, obj size does not include elements -// -// use uintptr b/c it's guaranteed to be large enough -// to hold whatever we can fit in memory. -func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { - b, err := r.Peek(1) - if err != nil { - return 0, 0, err - } - lead := b[0] - spec := &sizes[lead] - size, mode := spec.size, spec.extra - if size == 0 { - return 0, 0, InvalidPrefixError(lead) - } - if mode >= 0 { - return uintptr(size), uintptr(mode), nil - } - b, err = r.Peek(int(size)) - if err != nil { - return 0, 0, err - } - switch mode { - case extra8: - return uintptr(size) + uintptr(b[1]), 0, nil - case extra16: - return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil - case extra32: - return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil - case map16v: - return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil - case map32v: - return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil - case array16v: - return uintptr(size), uintptr(big.Uint16(b[1:])), nil - case array32v: - return uintptr(size), uintptr(big.Uint32(b[1:])), nil - default: - return 0, 0, fatal - } -} - -// Skip skips over the next object, regardless of -// its type. If it is an array or map, the whole array -// or map will be skipped. -func (m *Reader) Skip() error { - var ( - v uintptr // bytes - o uintptr // objects - err error - p []byte - ) - - // we can use the faster - // method if we have enough - // buffered data - if m.R.Buffered() >= 5 { - p, err = m.R.Peek(5) - if err != nil { - return err - } - v, o, err = getSize(p) - if err != nil { - return err - } - } else { - v, o, err = getNextSize(m.R) - if err != nil { - return err - } - } - - // 'v' is always non-zero - // if err == nil - _, err = m.R.Skip(int(v)) - if err != nil { - return err - } - - // for maps and slices, skip elements - for x := uintptr(0); x < o; x++ { - err = m.Skip() - if err != nil { - return err - } - } - return nil -} - -// ReadMapHeader reads the next object -// as a map header and returns the size -// of the map and the number of bytes written. -// It will return a TypeError{} if the next -// object is not a map. -func (m *Reader) ReadMapHeader() (sz uint32, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - if isfixmap(lead) { - sz = uint32(rfixmap(lead)) - _, err = m.R.Skip(1) - return - } - switch lead { - case mmap16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - case mmap32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = big.Uint32(p[1:]) - return - default: - err = badPrefix(MapType, lead) - return - } -} - -// ReadMapKey reads either a 'str' or 'bin' field from -// the reader and returns the value as a []byte. It uses -// scratch for storage if it is large enough. -func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { - out, err := m.ReadStringAsBytes(scratch) - if err != nil { - if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return m.ReadBytes(scratch) - } - return nil, err - } - return out, nil -} - -// MapKeyPtr returns a []byte pointing to the contents -// of a valid map key. The key cannot be empty, and it -// must be shorter than the total buffer size of the -// *Reader. Additionally, the returned slice is only -// valid until the next *Reader method call. Users -// should exercise extreme care when using this -// method; writing into the returned slice may -// corrupt future reads. -func (m *Reader) ReadMapKeyPtr() ([]byte, error) { - p, err := m.R.Peek(1) - if err != nil { - return nil, err - } - lead := p[0] - var read int - if isfixstr(lead) { - read = int(rfixstr(lead)) - m.R.Skip(1) - goto fill - } - switch lead { - case mstr8, mbin8: - p, err = m.R.Next(2) - if err != nil { - return nil, err - } - read = int(p[1]) - case mstr16, mbin16: - p, err = m.R.Next(3) - if err != nil { - return nil, err - } - read = int(big.Uint16(p[1:])) - case mstr32, mbin32: - p, err = m.R.Next(5) - if err != nil { - return nil, err - } - read = int(big.Uint32(p[1:])) - default: - return nil, badPrefix(StrType, lead) - } -fill: - if read == 0 { - return nil, ErrShortBytes - } - return m.R.Next(read) -} - -// ReadArrayHeader reads the next object as an -// array header and returns the size of the array -// and the number of bytes read. -func (m *Reader) ReadArrayHeader() (sz uint32, err error) { - var lead byte - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - if isfixarray(lead) { - sz = uint32(rfixarray(lead)) - _, err = m.R.Skip(1) - return - } - switch lead { - case marray16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - - case marray32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = big.Uint32(p[1:]) - return - - default: - err = badPrefix(ArrayType, lead) - return - } -} - -// ReadNil reads a 'nil' MessagePack byte from the reader -func (m *Reader) ReadNil() error { - p, err := m.R.Peek(1) - if err != nil { - return err - } - if p[0] != mnil { - return badPrefix(NilType, p[0]) - } - _, err = m.R.Skip(1) - return err -} - -// ReadFloat64 reads a float64 from the reader. -// (If the value on the wire is encoded as a float32, -// it will be up-cast to a float64.) -func (m *Reader) ReadFloat64() (f float64, err error) { - var p []byte - p, err = m.R.Peek(9) - if err != nil { - // we'll allow a coversion from float32 to float64, - // since we don't lose any precision - if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { - ef, err := m.ReadFloat32() - return float64(ef), err - } - return - } - if p[0] != mfloat64 { - // see above - if p[0] == mfloat32 { - ef, err := m.ReadFloat32() - return float64(ef), err - } - err = badPrefix(Float64Type, p[0]) - return - } - f = math.Float64frombits(getMuint64(p)) - _, err = m.R.Skip(9) - return -} - -// ReadFloat32 reads a float32 from the reader -func (m *Reader) ReadFloat32() (f float32, err error) { - var p []byte - p, err = m.R.Peek(5) - if err != nil { - return - } - if p[0] != mfloat32 { - err = badPrefix(Float32Type, p[0]) - return - } - f = math.Float32frombits(getMuint32(p)) - _, err = m.R.Skip(5) - return -} - -// ReadBool reads a bool from the reader -func (m *Reader) ReadBool() (b bool, err error) { - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - switch p[0] { - case mtrue: - b = true - case mfalse: - default: - err = badPrefix(BoolType, p[0]) - return - } - _, err = m.R.Skip(1) - return -} - -// ReadInt64 reads an int64 from the reader -func (m *Reader) ReadInt64() (i int64, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - - if isfixint(lead) { - i = int64(rfixint(lead)) - _, err = m.R.Skip(1) - return - } else if isnfixint(lead) { - i = int64(rnfixint(lead)) - _, err = m.R.Skip(1) - return - } - - switch lead { - case mint8: - p, err = m.R.Next(2) - if err != nil { - return - } - i = int64(getMint8(p)) - return - - case muint8: - p, err = m.R.Next(2) - if err != nil { - return - } - i = int64(getMuint8(p)) - return - - case mint16: - p, err = m.R.Next(3) - if err != nil { - return - } - i = int64(getMint16(p)) - return - - case muint16: - p, err = m.R.Next(3) - if err != nil { - return - } - i = int64(getMuint16(p)) - return - - case mint32: - p, err = m.R.Next(5) - if err != nil { - return - } - i = int64(getMint32(p)) - return - - case muint32: - p, err = m.R.Next(5) - if err != nil { - return - } - i = int64(getMuint32(p)) - return - - case mint64: - p, err = m.R.Next(9) - if err != nil { - return - } - i = getMint64(p) - return - - case muint64: - p, err = m.R.Next(9) - if err != nil { - return - } - u := getMuint64(p) - if u > math.MaxInt64 { - err = UintOverflow{Value: u, FailedBitsize: 64} - return - } - i = int64(u) - return - - default: - err = badPrefix(IntType, lead) - return - } -} - -// ReadInt32 reads an int32 from the reader -func (m *Reader) ReadInt32() (i int32, err error) { - var in int64 - in, err = m.ReadInt64() - if in > math.MaxInt32 || in < math.MinInt32 { - err = IntOverflow{Value: in, FailedBitsize: 32} - return - } - i = int32(in) - return -} - -// ReadInt16 reads an int16 from the reader -func (m *Reader) ReadInt16() (i int16, err error) { - var in int64 - in, err = m.ReadInt64() - if in > math.MaxInt16 || in < math.MinInt16 { - err = IntOverflow{Value: in, FailedBitsize: 16} - return - } - i = int16(in) - return -} - -// ReadInt8 reads an int8 from the reader -func (m *Reader) ReadInt8() (i int8, err error) { - var in int64 - in, err = m.ReadInt64() - if in > math.MaxInt8 || in < math.MinInt8 { - err = IntOverflow{Value: in, FailedBitsize: 8} - return - } - i = int8(in) - return -} - -// ReadInt reads an int from the reader -func (m *Reader) ReadInt() (i int, err error) { - if smallint { - var in int32 - in, err = m.ReadInt32() - i = int(in) - return - } - var in int64 - in, err = m.ReadInt64() - i = int(in) - return -} - -// ReadUint64 reads a uint64 from the reader -func (m *Reader) ReadUint64() (u uint64, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - if isfixint(lead) { - u = uint64(rfixint(lead)) - _, err = m.R.Skip(1) - return - } - switch lead { - case mint8: - p, err = m.R.Next(2) - if err != nil { - return - } - v := int64(getMint8(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint8: - p, err = m.R.Next(2) - if err != nil { - return - } - u = uint64(getMuint8(p)) - return - - case mint16: - p, err = m.R.Next(3) - if err != nil { - return - } - v := int64(getMint16(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint16: - p, err = m.R.Next(3) - if err != nil { - return - } - u = uint64(getMuint16(p)) - return - - case mint32: - p, err = m.R.Next(5) - if err != nil { - return - } - v := int64(getMint32(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint32: - p, err = m.R.Next(5) - if err != nil { - return - } - u = uint64(getMuint32(p)) - return - - case mint64: - p, err = m.R.Next(9) - if err != nil { - return - } - v := int64(getMint64(p)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - return - - case muint64: - p, err = m.R.Next(9) - if err != nil { - return - } - u = getMuint64(p) - return - - default: - if isnfixint(lead) { - err = UintBelowZero{Value: int64(rnfixint(lead))} - } else { - err = badPrefix(UintType, lead) - } - return - - } -} - -// ReadUint32 reads a uint32 from the reader -func (m *Reader) ReadUint32() (u uint32, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint32 { - err = UintOverflow{Value: in, FailedBitsize: 32} - return - } - u = uint32(in) - return -} - -// ReadUint16 reads a uint16 from the reader -func (m *Reader) ReadUint16() (u uint16, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint16 { - err = UintOverflow{Value: in, FailedBitsize: 16} - return - } - u = uint16(in) - return -} - -// ReadUint8 reads a uint8 from the reader -func (m *Reader) ReadUint8() (u uint8, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint8 { - err = UintOverflow{Value: in, FailedBitsize: 8} - return - } - u = uint8(in) - return -} - -// ReadUint reads a uint from the reader -func (m *Reader) ReadUint() (u uint, err error) { - if smallint { - var un uint32 - un, err = m.ReadUint32() - u = uint(un) - return - } - var un uint64 - un, err = m.ReadUint64() - u = uint(un) - return -} - -// ReadByte is analogous to ReadUint8. -// -// NOTE: this is *not* an implementation -// of io.ByteReader. -func (m *Reader) ReadByte() (b byte, err error) { - var in uint64 - in, err = m.ReadUint64() - if in > math.MaxUint8 { - err = UintOverflow{Value: in, FailedBitsize: 8} - return - } - b = byte(in) - return -} - -// ReadBytes reads a MessagePack 'bin' object -// from the reader and returns its value. It may -// use 'scratch' for storage if it is non-nil. -func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(2) - if err != nil { - return - } - lead = p[0] - var read int64 - switch lead { - case mbin8: - read = int64(p[1]) - m.R.Skip(2) - case mbin16: - p, err = m.R.Next(3) - if err != nil { - return - } - read = int64(big.Uint16(p[1:])) - case mbin32: - p, err = m.R.Next(5) - if err != nil { - return - } - read = int64(big.Uint32(p[1:])) - default: - err = badPrefix(BinType, lead) - return - } - if int64(cap(scratch)) < read { - b = make([]byte, read) - } else { - b = scratch[0:read] - } - _, err = m.R.ReadFull(b) - return -} - -// ReadBytesHeader reads the size header -// of a MessagePack 'bin' object. The user -// is responsible for dealing with the next -// 'sz' bytes from the reader in an application-specific -// way. -func (m *Reader) ReadBytesHeader() (sz uint32, err error) { - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - switch p[0] { - case mbin8: - p, err = m.R.Next(2) - if err != nil { - return - } - sz = uint32(p[1]) - return - case mbin16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - case mbin32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = uint32(big.Uint32(p[1:])) - return - default: - err = badPrefix(BinType, p[0]) - return - } -} - -// ReadExactBytes reads a MessagePack 'bin'-encoded -// object off of the wire into the provided slice. An -// ArrayError will be returned if the object is not -// exactly the length of the input slice. -func (m *Reader) ReadExactBytes(into []byte) error { - p, err := m.R.Peek(2) - if err != nil { - return err - } - lead := p[0] - var read int64 // bytes to read - var skip int // prefix size to skip - switch lead { - case mbin8: - read = int64(p[1]) - skip = 2 - case mbin16: - p, err = m.R.Peek(3) - if err != nil { - return err - } - read = int64(big.Uint16(p[1:])) - skip = 3 - case mbin32: - p, err = m.R.Peek(5) - if err != nil { - return err - } - read = int64(big.Uint32(p[1:])) - skip = 5 - default: - return badPrefix(BinType, lead) - } - if read != int64(len(into)) { - return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} - } - m.R.Skip(skip) - _, err = m.R.ReadFull(into) - return err -} - -// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string -// and returns its value as bytes. It may use 'scratch' for storage -// if it is non-nil. -func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { - var p []byte - var lead byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - var read int64 - - if isfixstr(lead) { - read = int64(rfixstr(lead)) - m.R.Skip(1) - goto fill - } - - switch lead { - case mstr8: - p, err = m.R.Next(2) - if err != nil { - return - } - read = int64(uint8(p[1])) - case mstr16: - p, err = m.R.Next(3) - if err != nil { - return - } - read = int64(big.Uint16(p[1:])) - case mstr32: - p, err = m.R.Next(5) - if err != nil { - return - } - read = int64(big.Uint32(p[1:])) - default: - err = badPrefix(StrType, lead) - return - } -fill: - if int64(cap(scratch)) < read { - b = make([]byte, read) - } else { - b = scratch[0:read] - } - _, err = m.R.ReadFull(b) - return -} - -// ReadStringHeader reads a string header -// off of the wire. The user is then responsible -// for dealing with the next 'sz' bytes from -// the reader in an application-specific manner. -func (m *Reader) ReadStringHeader() (sz uint32, err error) { - var p []byte - p, err = m.R.Peek(1) - if err != nil { - return - } - lead := p[0] - if isfixstr(lead) { - sz = uint32(rfixstr(lead)) - m.R.Skip(1) - return - } - switch lead { - case mstr8: - p, err = m.R.Next(2) - if err != nil { - return - } - sz = uint32(p[1]) - return - case mstr16: - p, err = m.R.Next(3) - if err != nil { - return - } - sz = uint32(big.Uint16(p[1:])) - return - case mstr32: - p, err = m.R.Next(5) - if err != nil { - return - } - sz = big.Uint32(p[1:]) - return - default: - err = badPrefix(StrType, lead) - return - } -} - -// ReadString reads a utf-8 string from the reader -func (m *Reader) ReadString() (s string, err error) { - var p []byte - var lead byte - var read int64 - p, err = m.R.Peek(1) - if err != nil { - return - } - lead = p[0] - - if isfixstr(lead) { - read = int64(rfixstr(lead)) - m.R.Skip(1) - goto fill - } - - switch lead { - case mstr8: - p, err = m.R.Next(2) - if err != nil { - return - } - read = int64(uint8(p[1])) - case mstr16: - p, err = m.R.Next(3) - if err != nil { - return - } - read = int64(big.Uint16(p[1:])) - case mstr32: - p, err = m.R.Next(5) - if err != nil { - return - } - read = int64(big.Uint32(p[1:])) - default: - err = badPrefix(StrType, lead) - return - } -fill: - if read == 0 { - s, err = "", nil - return - } - // reading into the memory - // that will become the string - // itself has vastly superior - // worst-case performance, because - // the reader buffer doesn't have - // to be large enough to hold the string. - // the idea here is to make it more - // difficult for someone malicious - // to cause the system to run out of - // memory by sending very large strings. - // - // NOTE: this works because the argument - // passed to (*fwd.Reader).ReadFull escapes - // to the heap; its argument may, in turn, - // be passed to the underlying reader, and - // thus escape analysis *must* conclude that - // 'out' escapes. - out := make([]byte, read) - _, err = m.R.ReadFull(out) - if err != nil { - return - } - s = UnsafeString(out) - return -} - -// ReadComplex64 reads a complex64 from the reader -func (m *Reader) ReadComplex64() (f complex64, err error) { - var p []byte - p, err = m.R.Peek(10) - if err != nil { - return - } - if p[0] != mfixext8 { - err = badPrefix(Complex64Type, p[0]) - return - } - if int8(p[1]) != Complex64Extension { - err = errExt(int8(p[1]), Complex64Extension) - return - } - f = complex(math.Float32frombits(big.Uint32(p[2:])), - math.Float32frombits(big.Uint32(p[6:]))) - _, err = m.R.Skip(10) - return -} - -// ReadComplex128 reads a complex128 from the reader -func (m *Reader) ReadComplex128() (f complex128, err error) { - var p []byte - p, err = m.R.Peek(18) - if err != nil { - return - } - if p[0] != mfixext16 { - err = badPrefix(Complex128Type, p[0]) - return - } - if int8(p[1]) != Complex128Extension { - err = errExt(int8(p[1]), Complex128Extension) - return - } - f = complex(math.Float64frombits(big.Uint64(p[2:])), - math.Float64frombits(big.Uint64(p[10:]))) - _, err = m.R.Skip(18) - return -} - -// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. -// (You must pass a non-nil map into the function.) -func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { - var sz uint32 - sz, err = m.ReadMapHeader() - if err != nil { - return - } - for key := range mp { - delete(mp, key) - } - for i := uint32(0); i < sz; i++ { - var key string - var val interface{} - key, err = m.ReadString() - if err != nil { - return - } - val, err = m.ReadIntf() - if err != nil { - return - } - mp[key] = val - } - return -} - -// ReadTime reads a time.Time object from the reader. -// The returned time's location will be set to time.Local. -func (m *Reader) ReadTime() (t time.Time, err error) { - var p []byte - p, err = m.R.Peek(15) - if err != nil { - return - } - if p[0] != mext8 || p[1] != 12 { - err = badPrefix(TimeType, p[0]) - return - } - if int8(p[2]) != TimeExtension { - err = errExt(int8(p[2]), TimeExtension) - return - } - sec, nsec := getUnix(p[3:]) - t = time.Unix(sec, int64(nsec)).Local() - _, err = m.R.Skip(15) - return -} - -// ReadIntf reads out the next object as a raw interface{}. -// Arrays are decoded as []interface{}, and maps are decoded -// as map[string]interface{}. Integers are decoded as int64 -// and unsigned integers are decoded as uint64. -func (m *Reader) ReadIntf() (i interface{}, err error) { - var t Type - t, err = m.NextType() - if err != nil { - return - } - switch t { - case BoolType: - i, err = m.ReadBool() - return - - case IntType: - i, err = m.ReadInt64() - return - - case UintType: - i, err = m.ReadUint64() - return - - case BinType: - i, err = m.ReadBytes(nil) - return - - case StrType: - i, err = m.ReadString() - return - - case Complex64Type: - i, err = m.ReadComplex64() - return - - case Complex128Type: - i, err = m.ReadComplex128() - return - - case TimeType: - i, err = m.ReadTime() - return - - case ExtensionType: - var t int8 - t, err = m.peekExtensionType() - if err != nil { - return - } - f, ok := extensionReg[t] - if ok { - e := f() - err = m.ReadExtension(e) - i = e - return - } - var e RawExtension - e.Type = t - err = m.ReadExtension(&e) - i = &e - return - - case MapType: - mp := make(map[string]interface{}) - err = m.ReadMapStrIntf(mp) - i = mp - return - - case NilType: - err = m.ReadNil() - i = nil - return - - case Float32Type: - i, err = m.ReadFloat32() - return - - case Float64Type: - i, err = m.ReadFloat64() - return - - case ArrayType: - var sz uint32 - sz, err = m.ReadArrayHeader() - - if err != nil { - return - } - out := make([]interface{}, int(sz)) - for j := range out { - out[j], err = m.ReadIntf() - if err != nil { - return - } - } - i = out - return - - default: - return nil, fatal // unreachable - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go deleted file mode 100644 index f53f84d01..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go +++ /dev/null @@ -1,1197 +0,0 @@ -package msgp - -import ( - "bytes" - "encoding/binary" - "math" - "time" -) - -var big = binary.BigEndian - -// NextType returns the type of the next -// object in the slice. If the length -// of the input is zero, it returns -// InvalidType. -func NextType(b []byte) Type { - if len(b) == 0 { - return InvalidType - } - spec := sizes[b[0]] - t := spec.typ - if t == ExtensionType && len(b) > int(spec.size) { - var tp int8 - if spec.extra == constsize { - tp = int8(b[1]) - } else { - tp = int8(b[spec.size-1]) - } - switch tp { - case TimeExtension: - return TimeType - case Complex128Extension: - return Complex128Type - case Complex64Extension: - return Complex64Type - default: - return ExtensionType - } - } - return t -} - -// IsNil returns true if len(b)>0 and -// the leading byte is a 'nil' MessagePack -// byte; false otherwise -func IsNil(b []byte) bool { - if len(b) != 0 && b[0] == mnil { - return true - } - return false -} - -// Raw is raw MessagePack. -// Raw allows you to read and write -// data without interpreting its contents. -type Raw []byte - -// MarshalMsg implements msgp.Marshaler. -// It appends the raw contents of 'raw' -// to the provided byte slice. If 'raw' -// is 0 bytes, 'nil' will be appended instead. -func (r Raw) MarshalMsg(b []byte) ([]byte, error) { - i := len(r) - if i == 0 { - return AppendNil(b), nil - } - o, l := ensure(b, i) - copy(o[l:], []byte(r)) - return o, nil -} - -// UnmarshalMsg implements msgp.Unmarshaler. -// It sets the contents of *Raw to be the next -// object in the provided byte slice. -func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { - l := len(b) - out, err := Skip(b) - if err != nil { - return b, err - } - rlen := l - len(out) - if IsNil(b[:rlen]) { - rlen = 0 - } - if cap(*r) < rlen { - *r = make(Raw, rlen) - } else { - *r = (*r)[0:rlen] - } - copy(*r, b[:rlen]) - return out, nil -} - -// EncodeMsg implements msgp.Encodable. -// It writes the raw bytes to the writer. -// If r is empty, it writes 'nil' instead. -func (r Raw) EncodeMsg(w *Writer) error { - if len(r) == 0 { - return w.WriteNil() - } - _, err := w.Write([]byte(r)) - return err -} - -// DecodeMsg implements msgp.Decodable. -// It sets the value of *Raw to be the -// next object on the wire. -func (r *Raw) DecodeMsg(f *Reader) error { - *r = (*r)[:0] - err := appendNext(f, (*[]byte)(r)) - if IsNil(*r) { - *r = (*r)[:0] - } - return err -} - -// Msgsize implements msgp.Sizer -func (r Raw) Msgsize() int { - l := len(r) - if l == 0 { - return 1 // for 'nil' - } - return l -} - -func appendNext(f *Reader, d *[]byte) error { - amt, o, err := getNextSize(f.R) - if err != nil { - return err - } - var i int - *d, i = ensure(*d, int(amt)) - _, err = f.R.ReadFull((*d)[i:]) - if err != nil { - return err - } - for o > 0 { - err = appendNext(f, d) - if err != nil { - return err - } - o-- - } - return nil -} - -// MarshalJSON implements json.Marshaler -func (r *Raw) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - _, err := UnmarshalAsJSON(&buf, []byte(*r)) - return buf.Bytes(), err -} - -// ReadMapHeaderBytes reads a map header size -// from 'b' and returns the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a map) -func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { - l := len(b) - if l < 1 { - err = ErrShortBytes - return - } - - lead := b[0] - if isfixmap(lead) { - sz = uint32(rfixmap(lead)) - o = b[1:] - return - } - - switch lead { - case mmap16: - if l < 3 { - err = ErrShortBytes - return - } - sz = uint32(big.Uint16(b[1:])) - o = b[3:] - return - - case mmap32: - if l < 5 { - err = ErrShortBytes - return - } - sz = big.Uint32(b[1:]) - o = b[5:] - return - - default: - err = badPrefix(MapType, lead) - return - } -} - -// ReadMapKeyZC attempts to read a map key -// from 'b' and returns the key bytes and the remaining bytes -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a str or bin) -func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { - o, b, err := ReadStringZC(b) - if err != nil { - if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return ReadBytesZC(b) - } - return nil, b, err - } - return o, b, nil -} - -// ReadArrayHeaderBytes attempts to read -// the array header size off of 'b' and return -// the size and remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not an array) -func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { - if len(b) < 1 { - return 0, nil, ErrShortBytes - } - lead := b[0] - if isfixarray(lead) { - sz = uint32(rfixarray(lead)) - o = b[1:] - return - } - - switch lead { - case marray16: - if len(b) < 3 { - err = ErrShortBytes - return - } - sz = uint32(big.Uint16(b[1:])) - o = b[3:] - return - - case marray32: - if len(b) < 5 { - err = ErrShortBytes - return - } - sz = big.Uint32(b[1:]) - o = b[5:] - return - - default: - err = badPrefix(ArrayType, lead) - return - } -} - -// ReadNilBytes tries to read a "nil" byte -// off of 'b' and return the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a 'nil') -// - InvalidPrefixError -func ReadNilBytes(b []byte) ([]byte, error) { - if len(b) < 1 { - return nil, ErrShortBytes - } - if b[0] != mnil { - return b, badPrefix(NilType, b[0]) - } - return b[1:], nil -} - -// ReadFloat64Bytes tries to read a float64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a float64) -func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { - if len(b) < 9 { - if len(b) >= 5 && b[0] == mfloat32 { - var tf float32 - tf, o, err = ReadFloat32Bytes(b) - f = float64(tf) - return - } - err = ErrShortBytes - return - } - - if b[0] != mfloat64 { - if b[0] == mfloat32 { - var tf float32 - tf, o, err = ReadFloat32Bytes(b) - f = float64(tf) - return - } - err = badPrefix(Float64Type, b[0]) - return - } - - f = math.Float64frombits(getMuint64(b)) - o = b[9:] - return -} - -// ReadFloat32Bytes tries to read a float64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a float32) -func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { - if len(b) < 5 { - err = ErrShortBytes - return - } - - if b[0] != mfloat32 { - err = TypeError{Method: Float32Type, Encoded: getType(b[0])} - return - } - - f = math.Float32frombits(getMuint32(b)) - o = b[5:] - return -} - -// ReadBoolBytes tries to read a float64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a bool) -func ReadBoolBytes(b []byte) (bool, []byte, error) { - if len(b) < 1 { - return false, b, ErrShortBytes - } - switch b[0] { - case mtrue: - return true, b[1:], nil - case mfalse: - return false, b[1:], nil - default: - return false, b, badPrefix(BoolType, b[0]) - } -} - -// ReadInt64Bytes tries to read an int64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError (not a int) -func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { - l := len(b) - if l < 1 { - return 0, nil, ErrShortBytes - } - - lead := b[0] - if isfixint(lead) { - i = int64(rfixint(lead)) - o = b[1:] - return - } - if isnfixint(lead) { - i = int64(rnfixint(lead)) - o = b[1:] - return - } - - switch lead { - case mint8: - if l < 2 { - err = ErrShortBytes - return - } - i = int64(getMint8(b)) - o = b[2:] - return - - case muint8: - if l < 2 { - err = ErrShortBytes - return - } - i = int64(getMuint8(b)) - o = b[2:] - return - - case mint16: - if l < 3 { - err = ErrShortBytes - return - } - i = int64(getMint16(b)) - o = b[3:] - return - - case muint16: - if l < 3 { - err = ErrShortBytes - return - } - i = int64(getMuint16(b)) - o = b[3:] - return - - case mint32: - if l < 5 { - err = ErrShortBytes - return - } - i = int64(getMint32(b)) - o = b[5:] - return - - case muint32: - if l < 5 { - err = ErrShortBytes - return - } - i = int64(getMuint32(b)) - o = b[5:] - return - - case mint64: - if l < 9 { - err = ErrShortBytes - return - } - i = int64(getMint64(b)) - o = b[9:] - return - - case muint64: - if l < 9 { - err = ErrShortBytes - return - } - u := getMuint64(b) - if u > math.MaxInt64 { - err = UintOverflow{Value: u, FailedBitsize: 64} - return - } - i = int64(u) - o = b[9:] - return - - default: - err = badPrefix(IntType, lead) - return - } -} - -// ReadInt32Bytes tries to read an int32 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int32) -func ReadInt32Bytes(b []byte) (int32, []byte, error) { - i, o, err := ReadInt64Bytes(b) - if i > math.MaxInt32 || i < math.MinInt32 { - return 0, o, IntOverflow{Value: i, FailedBitsize: 32} - } - return int32(i), o, err -} - -// ReadInt16Bytes tries to read an int16 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int16) -func ReadInt16Bytes(b []byte) (int16, []byte, error) { - i, o, err := ReadInt64Bytes(b) - if i > math.MaxInt16 || i < math.MinInt16 { - return 0, o, IntOverflow{Value: i, FailedBitsize: 16} - } - return int16(i), o, err -} - -// ReadInt8Bytes tries to read an int16 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int8) -func ReadInt8Bytes(b []byte) (int8, []byte, error) { - i, o, err := ReadInt64Bytes(b) - if i > math.MaxInt8 || i < math.MinInt8 { - return 0, o, IntOverflow{Value: i, FailedBitsize: 8} - } - return int8(i), o, err -} - -// ReadIntBytes tries to read an int -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a int) -// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) -func ReadIntBytes(b []byte) (int, []byte, error) { - if smallint { - i, b, err := ReadInt32Bytes(b) - return int(i), b, err - } - i, b, err := ReadInt64Bytes(b) - return int(i), b, err -} - -// ReadUint64Bytes tries to read a uint64 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { - l := len(b) - if l < 1 { - return 0, nil, ErrShortBytes - } - - lead := b[0] - if isfixint(lead) { - u = uint64(rfixint(lead)) - o = b[1:] - return - } - - switch lead { - case mint8: - if l < 2 { - err = ErrShortBytes - return - } - v := int64(getMint8(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[2:] - return - - case muint8: - if l < 2 { - err = ErrShortBytes - return - } - u = uint64(getMuint8(b)) - o = b[2:] - return - - case mint16: - if l < 3 { - err = ErrShortBytes - return - } - v := int64(getMint16(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[3:] - return - - case muint16: - if l < 3 { - err = ErrShortBytes - return - } - u = uint64(getMuint16(b)) - o = b[3:] - return - - case mint32: - if l < 5 { - err = ErrShortBytes - return - } - v := int64(getMint32(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[5:] - return - - case muint32: - if l < 5 { - err = ErrShortBytes - return - } - u = uint64(getMuint32(b)) - o = b[5:] - return - - case mint64: - if l < 9 { - err = ErrShortBytes - return - } - v := int64(getMint64(b)) - if v < 0 { - err = UintBelowZero{Value: v} - return - } - u = uint64(v) - o = b[9:] - return - - case muint64: - if l < 9 { - err = ErrShortBytes - return - } - u = getMuint64(b) - o = b[9:] - return - - default: - if isnfixint(lead) { - err = UintBelowZero{Value: int64(rnfixint(lead))} - } else { - err = badPrefix(UintType, lead) - } - return - } -} - -// ReadUint32Bytes tries to read a uint32 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint32) -func ReadUint32Bytes(b []byte) (uint32, []byte, error) { - v, o, err := ReadUint64Bytes(b) - if v > math.MaxUint32 { - return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} - } - return uint32(v), o, err -} - -// ReadUint16Bytes tries to read a uint16 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint16) -func ReadUint16Bytes(b []byte) (uint16, []byte, error) { - v, o, err := ReadUint64Bytes(b) - if v > math.MaxUint16 { - return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} - } - return uint16(v), o, err -} - -// ReadUint8Bytes tries to read a uint8 -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint8) -func ReadUint8Bytes(b []byte) (uint8, []byte, error) { - v, o, err := ReadUint64Bytes(b) - if v > math.MaxUint8 { - return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} - } - return uint8(v), o, err -} - -// ReadUintBytes tries to read a uint -// from 'b' and return the value and the remaining bytes. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a uint) -// - UintOverflow{} (value too large for uint; 32-bit platforms only) -func ReadUintBytes(b []byte) (uint, []byte, error) { - if smallint { - u, b, err := ReadUint32Bytes(b) - return uint(u), b, err - } - u, b, err := ReadUint64Bytes(b) - return uint(u), b, err -} - -// ReadByteBytes is analogous to ReadUint8Bytes -func ReadByteBytes(b []byte) (byte, []byte, error) { - return ReadUint8Bytes(b) -} - -// ReadBytesBytes reads a 'bin' object -// from 'b' and returns its vaue and -// the remaining bytes in 'b'. -// Possible errors: -// - ErrShortBytes (too few bytes) -// - TypeError{} (not a 'bin' object) -func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { - return readBytesBytes(b, scratch, false) -} - -func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { - l := len(b) - if l < 1 { - return nil, nil, ErrShortBytes - } - - lead := b[0] - var read int - switch lead { - case mbin8: - if l < 2 { - err = ErrShortBytes - return - } - - read = int(b[1]) - b = b[2:] - - case mbin16: - if l < 3 { - err = ErrShortBytes - return - } - read = int(big.Uint16(b[1:])) - b = b[3:] - - case mbin32: - if l < 5 { - err = ErrShortBytes - return - } - read = int(big.Uint32(b[1:])) - b = b[5:] - - default: - err = badPrefix(BinType, lead) - return - } - - if len(b) < read { - err = ErrShortBytes - return - } - - // zero-copy - if zc { - v = b[0:read] - o = b[read:] - return - } - - if cap(scratch) >= read { - v = scratch[0:read] - } else { - v = make([]byte, read) - } - - o = b[copy(v, b):] - return -} - -// ReadBytesZC extracts the messagepack-encoded -// binary field without copying. The returned []byte -// points to the same memory as the input slice. -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (object not 'bin') -func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { - return readBytesBytes(b, nil, true) -} - -func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { - l := len(b) - if l < 1 { - err = ErrShortBytes - return - } - - lead := b[0] - var read uint32 - var skip int - switch lead { - case mbin8: - if l < 2 { - err = ErrShortBytes - return - } - - read = uint32(b[1]) - skip = 2 - - case mbin16: - if l < 3 { - err = ErrShortBytes - return - } - read = uint32(big.Uint16(b[1:])) - skip = 3 - - case mbin32: - if l < 5 { - err = ErrShortBytes - return - } - read = uint32(big.Uint32(b[1:])) - skip = 5 - - default: - err = badPrefix(BinType, lead) - return - } - - if read != uint32(len(into)) { - err = ArrayError{Wanted: uint32(len(into)), Got: read} - return - } - - o = b[skip+copy(into, b[skip:]):] - return -} - -// ReadStringZC reads a messagepack string field -// without copying. The returned []byte points -// to the same memory as the input slice. -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (object not 'str') -func ReadStringZC(b []byte) (v []byte, o []byte, err error) { - l := len(b) - if l < 1 { - return nil, nil, ErrShortBytes - } - - lead := b[0] - var read int - - if isfixstr(lead) { - read = int(rfixstr(lead)) - b = b[1:] - } else { - switch lead { - case mstr8: - if l < 2 { - err = ErrShortBytes - return - } - read = int(b[1]) - b = b[2:] - - case mstr16: - if l < 3 { - err = ErrShortBytes - return - } - read = int(big.Uint16(b[1:])) - b = b[3:] - - case mstr32: - if l < 5 { - err = ErrShortBytes - return - } - read = int(big.Uint32(b[1:])) - b = b[5:] - - default: - err = TypeError{Method: StrType, Encoded: getType(lead)} - return - } - } - - if len(b) < read { - err = ErrShortBytes - return - } - - v = b[0:read] - o = b[read:] - return -} - -// ReadStringBytes reads a 'str' object -// from 'b' and returns its value and the -// remaining bytes in 'b'. -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (not 'str' type) -// - InvalidPrefixError -func ReadStringBytes(b []byte) (string, []byte, error) { - v, o, err := ReadStringZC(b) - return string(v), o, err -} - -// ReadStringAsBytes reads a 'str' object -// into a slice of bytes. 'v' is the value of -// the 'str' object, which may reside in memory -// pointed to by 'scratch.' 'o' is the remaining bytes -// in 'b.'' -// Possible errors: -// - ErrShortBytes (b not long enough) -// - TypeError{} (not 'str' type) -// - InvalidPrefixError (unknown type marker) -func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { - var tmp []byte - tmp, o, err = ReadStringZC(b) - v = append(scratch[:0], tmp...) - return -} - -// ReadComplex128Bytes reads a complex128 -// extension object from 'b' and returns the -// remaining bytes. -// Possible errors: -// - ErrShortBytes (not enough bytes in 'b') -// - TypeError{} (object not a complex128) -// - InvalidPrefixError -// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) -func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { - if len(b) < 18 { - err = ErrShortBytes - return - } - if b[0] != mfixext16 { - err = badPrefix(Complex128Type, b[0]) - return - } - if int8(b[1]) != Complex128Extension { - err = errExt(int8(b[1]), Complex128Extension) - return - } - c = complex(math.Float64frombits(big.Uint64(b[2:])), - math.Float64frombits(big.Uint64(b[10:]))) - o = b[18:] - return -} - -// ReadComplex64Bytes reads a complex64 -// extension object from 'b' and returns the -// remaining bytes. -// Possible errors: -// - ErrShortBytes (not enough bytes in 'b') -// - TypeError{} (object not a complex64) -// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) -func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { - if len(b) < 10 { - err = ErrShortBytes - return - } - if b[0] != mfixext8 { - err = badPrefix(Complex64Type, b[0]) - return - } - if b[1] != Complex64Extension { - err = errExt(int8(b[1]), Complex64Extension) - return - } - c = complex(math.Float32frombits(big.Uint32(b[2:])), - math.Float32frombits(big.Uint32(b[6:]))) - o = b[10:] - return -} - -// ReadTimeBytes reads a time.Time -// extension object from 'b' and returns the -// remaining bytes. -// Possible errors: -// - ErrShortBytes (not enough bytes in 'b') -// - TypeError{} (object not a complex64) -// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) -func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { - if len(b) < 15 { - err = ErrShortBytes - return - } - if b[0] != mext8 || b[1] != 12 { - err = badPrefix(TimeType, b[0]) - return - } - if int8(b[2]) != TimeExtension { - err = errExt(int8(b[2]), TimeExtension) - return - } - sec, nsec := getUnix(b[3:]) - t = time.Unix(sec, int64(nsec)).Local() - o = b[15:] - return -} - -// ReadMapStrIntfBytes reads a map[string]interface{} -// out of 'b' and returns the map and remaining bytes. -// If 'old' is non-nil, the values will be read into that map. -func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { - var sz uint32 - o = b - sz, o, err = ReadMapHeaderBytes(o) - - if err != nil { - return - } - - if old != nil { - for key := range old { - delete(old, key) - } - v = old - } else { - v = make(map[string]interface{}, int(sz)) - } - - for z := uint32(0); z < sz; z++ { - if len(o) < 1 { - err = ErrShortBytes - return - } - var key []byte - key, o, err = ReadMapKeyZC(o) - if err != nil { - return - } - var val interface{} - val, o, err = ReadIntfBytes(o) - if err != nil { - return - } - v[string(key)] = val - } - return -} - -// ReadIntfBytes attempts to read -// the next object out of 'b' as a raw interface{} and -// return the remaining bytes. -func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { - if len(b) < 1 { - err = ErrShortBytes - return - } - - k := NextType(b) - - switch k { - case MapType: - i, o, err = ReadMapStrIntfBytes(b, nil) - return - - case ArrayType: - var sz uint32 - sz, o, err = ReadArrayHeaderBytes(b) - if err != nil { - return - } - j := make([]interface{}, int(sz)) - i = j - for d := range j { - j[d], o, err = ReadIntfBytes(o) - if err != nil { - return - } - } - return - - case Float32Type: - i, o, err = ReadFloat32Bytes(b) - return - - case Float64Type: - i, o, err = ReadFloat64Bytes(b) - return - - case IntType: - i, o, err = ReadInt64Bytes(b) - return - - case UintType: - i, o, err = ReadUint64Bytes(b) - return - - case BoolType: - i, o, err = ReadBoolBytes(b) - return - - case TimeType: - i, o, err = ReadTimeBytes(b) - return - - case Complex64Type: - i, o, err = ReadComplex64Bytes(b) - return - - case Complex128Type: - i, o, err = ReadComplex128Bytes(b) - return - - case ExtensionType: - var t int8 - t, err = peekExtension(b) - if err != nil { - return - } - // use a user-defined extension, - // if it's been registered - f, ok := extensionReg[t] - if ok { - e := f() - o, err = ReadExtensionBytes(b, e) - i = e - return - } - // last resort is a raw extension - e := RawExtension{} - e.Type = int8(t) - o, err = ReadExtensionBytes(b, &e) - i = &e - return - - case NilType: - o, err = ReadNilBytes(b) - return - - case BinType: - i, o, err = ReadBytesBytes(b, nil) - return - - case StrType: - i, o, err = ReadStringBytes(b) - return - - default: - err = InvalidPrefixError(b[0]) - return - } -} - -// Skip skips the next object in 'b' and -// returns the remaining bytes. If the object -// is a map or array, all of its elements -// will be skipped. -// Possible Errors: -// - ErrShortBytes (not enough bytes in b) -// - InvalidPrefixError (bad encoding) -func Skip(b []byte) ([]byte, error) { - sz, asz, err := getSize(b) - if err != nil { - return b, err - } - if uintptr(len(b)) < sz { - return b, ErrShortBytes - } - b = b[sz:] - for asz > 0 { - b, err = Skip(b) - if err != nil { - return b, err - } - asz-- - } - return b, nil -} - -// returns (skip N bytes, skip M objects, error) -func getSize(b []byte) (uintptr, uintptr, error) { - l := len(b) - if l == 0 { - return 0, 0, ErrShortBytes - } - lead := b[0] - spec := &sizes[lead] // get type information - size, mode := spec.size, spec.extra - if size == 0 { - return 0, 0, InvalidPrefixError(lead) - } - if mode >= 0 { // fixed composites - return uintptr(size), uintptr(mode), nil - } - if l < int(size) { - return 0, 0, ErrShortBytes - } - switch mode { - case extra8: - return uintptr(size) + uintptr(b[1]), 0, nil - case extra16: - return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil - case extra32: - return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil - case map16v: - return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil - case map32v: - return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil - case array16v: - return uintptr(size), uintptr(big.Uint16(b[1:])), nil - case array32v: - return uintptr(size), uintptr(big.Uint32(b[1:])), nil - default: - return 0, 0, fatal - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go deleted file mode 100644 index ce2f8b16f..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/size.go +++ /dev/null @@ -1,38 +0,0 @@ -package msgp - -// The sizes provided -// are the worst-case -// encoded sizes for -// each type. For variable- -// length types ([]byte, string), -// the total encoded size is -// the prefix size plus the -// length of the object. -const ( - Int64Size = 9 - IntSize = Int64Size - UintSize = Int64Size - Int8Size = 2 - Int16Size = 3 - Int32Size = 5 - Uint8Size = 2 - ByteSize = Uint8Size - Uint16Size = 3 - Uint32Size = 5 - Uint64Size = Int64Size - Float64Size = 9 - Float32Size = 5 - Complex64Size = 10 - Complex128Size = 18 - - TimeSize = 15 - BoolSize = 1 - NilSize = 1 - - MapHeaderSize = 5 - ArrayHeaderSize = 5 - - BytesPrefixSize = 5 - StringPrefixSize = 5 - ExtensionPrefixSize = 6 -) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go deleted file mode 100644 index 3978b6ff6..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/unsafe.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !purego,!appengine - -package msgp - -import ( - "reflect" - "unsafe" -) - -// NOTE: -// all of the definition in this file -// should be repeated in appengine.go, -// but without using unsafe - -const ( - // spec says int and uint are always - // the same size, but that int/uint - // size may not be machine word size - smallint = unsafe.Sizeof(int(0)) == 4 -) - -// UnsafeString returns the byte slice as a volatile string -// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. -// THIS IS EVIL CODE. -// YOU HAVE BEEN WARNED. -func UnsafeString(b []byte) string { - sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: sh.Data, Len: sh.Len})) -} - -// UnsafeBytes returns the string as a byte slice -// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. -// THIS IS EVIL CODE. -// YOU HAVE BEEN WARNED. -func UnsafeBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Len: len(s), - Cap: len(s), - Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data, - })) -} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go deleted file mode 100644 index fb1947c57..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/write.go +++ /dev/null @@ -1,845 +0,0 @@ -package msgp - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "sync" - "time" -) - -// Sizer is an interface implemented -// by types that can estimate their -// size when MessagePack encoded. -// This interface is optional, but -// encoding/marshaling implementations -// may use this as a way to pre-allocate -// memory for serialization. -type Sizer interface { - Msgsize() int -} - -var ( - // Nowhere is an io.Writer to nowhere - Nowhere io.Writer = nwhere{} - - btsType = reflect.TypeOf(([]byte)(nil)) - writerPool = sync.Pool{ - New: func() interface{} { - return &Writer{buf: make([]byte, 2048)} - }, - } -) - -func popWriter(w io.Writer) *Writer { - wr := writerPool.Get().(*Writer) - wr.Reset(w) - return wr -} - -func pushWriter(wr *Writer) { - wr.w = nil - wr.wloc = 0 - writerPool.Put(wr) -} - -// freeW frees a writer for use -// by other processes. It is not necessary -// to call freeW on a writer. However, maintaining -// a reference to a *Writer after calling freeW on -// it will cause undefined behavior. -func freeW(w *Writer) { pushWriter(w) } - -// Require ensures that cap(old)-len(old) >= extra. -func Require(old []byte, extra int) []byte { - l := len(old) - c := cap(old) - r := l + extra - if c >= r { - return old - } else if l == 0 { - return make([]byte, 0, extra) - } - // the new size is the greater - // of double the old capacity - // and the sum of the old length - // and the number of new bytes - // necessary. - c <<= 1 - if c < r { - c = r - } - n := make([]byte, l, c) - copy(n, old) - return n -} - -// nowhere writer -type nwhere struct{} - -func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } - -// Marshaler is the interface implemented -// by types that know how to marshal themselves -// as MessagePack. MarshalMsg appends the marshalled -// form of the object to the provided -// byte slice, returning the extended -// slice and any errors encountered. -type Marshaler interface { - MarshalMsg([]byte) ([]byte, error) -} - -// Encodable is the interface implemented -// by types that know how to write themselves -// as MessagePack using a *msgp.Writer. -type Encodable interface { - EncodeMsg(*Writer) error -} - -// Writer is a buffered writer -// that can be used to write -// MessagePack objects to an io.Writer. -// You must call *Writer.Flush() in order -// to flush all of the buffered data -// to the underlying writer. -type Writer struct { - w io.Writer - buf []byte - wloc int -} - -// NewWriter returns a new *Writer. -func NewWriter(w io.Writer) *Writer { - if wr, ok := w.(*Writer); ok { - return wr - } - return popWriter(w) -} - -// NewWriterSize returns a writer with a custom buffer size. -func NewWriterSize(w io.Writer, sz int) *Writer { - // we must be able to require() 18 - // contiguous bytes, so that is the - // practical minimum buffer size - if sz < 18 { - sz = 18 - } - - return &Writer{ - w: w, - buf: make([]byte, sz), - } -} - -// Encode encodes an Encodable to an io.Writer. -func Encode(w io.Writer, e Encodable) error { - wr := NewWriter(w) - err := e.EncodeMsg(wr) - if err == nil { - err = wr.Flush() - } - freeW(wr) - return err -} - -func (mw *Writer) flush() error { - if mw.wloc == 0 { - return nil - } - n, err := mw.w.Write(mw.buf[:mw.wloc]) - if err != nil { - if n > 0 { - mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) - } - return err - } - mw.wloc = 0 - return nil -} - -// Flush flushes all of the buffered -// data to the underlying writer. -func (mw *Writer) Flush() error { return mw.flush() } - -// Buffered returns the number bytes in the write buffer -func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } - -func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } - -func (mw *Writer) bufsize() int { return len(mw.buf) } - -// NOTE: this should only be called with -// a number that is guaranteed to be less than -// len(mw.buf). typically, it is called with a constant. -// -// NOTE: this is a hot code path -func (mw *Writer) require(n int) (int, error) { - c := len(mw.buf) - wl := mw.wloc - if c-wl < n { - if err := mw.flush(); err != nil { - return 0, err - } - wl = mw.wloc - } - mw.wloc += n - return wl, nil -} - -func (mw *Writer) Append(b ...byte) error { - if mw.avail() < len(b) { - err := mw.flush() - if err != nil { - return err - } - } - mw.wloc += copy(mw.buf[mw.wloc:], b) - return nil -} - -// push one byte onto the buffer -// -// NOTE: this is a hot code path -func (mw *Writer) push(b byte) error { - if mw.wloc == len(mw.buf) { - if err := mw.flush(); err != nil { - return err - } - } - mw.buf[mw.wloc] = b - mw.wloc++ - return nil -} - -func (mw *Writer) prefix8(b byte, u uint8) error { - const need = 2 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu8(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -func (mw *Writer) prefix16(b byte, u uint16) error { - const need = 3 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu16(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -func (mw *Writer) prefix32(b byte, u uint32) error { - const need = 5 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu32(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -func (mw *Writer) prefix64(b byte, u uint64) error { - const need = 9 - if len(mw.buf)-mw.wloc < need { - if err := mw.flush(); err != nil { - return err - } - } - prefixu64(mw.buf[mw.wloc:], b, u) - mw.wloc += need - return nil -} - -// Write implements io.Writer, and writes -// data directly to the buffer. -func (mw *Writer) Write(p []byte) (int, error) { - l := len(p) - if mw.avail() < l { - if err := mw.flush(); err != nil { - return 0, err - } - if l > len(mw.buf) { - return mw.w.Write(p) - } - } - mw.wloc += copy(mw.buf[mw.wloc:], p) - return l, nil -} - -// implements io.WriteString -func (mw *Writer) writeString(s string) error { - l := len(s) - if mw.avail() < l { - if err := mw.flush(); err != nil { - return err - } - if l > len(mw.buf) { - _, err := io.WriteString(mw.w, s) - return err - } - } - mw.wloc += copy(mw.buf[mw.wloc:], s) - return nil -} - -// Reset changes the underlying writer used by the Writer -func (mw *Writer) Reset(w io.Writer) { - mw.buf = mw.buf[:cap(mw.buf)] - mw.w = w - mw.wloc = 0 -} - -// WriteMapHeader writes a map header of the given -// size to the writer -func (mw *Writer) WriteMapHeader(sz uint32) error { - switch { - case sz <= 15: - return mw.push(wfixmap(uint8(sz))) - case sz <= math.MaxUint16: - return mw.prefix16(mmap16, uint16(sz)) - default: - return mw.prefix32(mmap32, sz) - } -} - -// WriteArrayHeader writes an array header of the -// given size to the writer -func (mw *Writer) WriteArrayHeader(sz uint32) error { - switch { - case sz <= 15: - return mw.push(wfixarray(uint8(sz))) - case sz <= math.MaxUint16: - return mw.prefix16(marray16, uint16(sz)) - default: - return mw.prefix32(marray32, sz) - } -} - -// WriteNil writes a nil byte to the buffer -func (mw *Writer) WriteNil() error { - return mw.push(mnil) -} - -// WriteFloat64 writes a float64 to the writer -func (mw *Writer) WriteFloat64(f float64) error { - return mw.prefix64(mfloat64, math.Float64bits(f)) -} - -// WriteFloat32 writes a float32 to the writer -func (mw *Writer) WriteFloat32(f float32) error { - return mw.prefix32(mfloat32, math.Float32bits(f)) -} - -// WriteInt64 writes an int64 to the writer -func (mw *Writer) WriteInt64(i int64) error { - if i >= 0 { - switch { - case i <= math.MaxInt8: - return mw.push(wfixint(uint8(i))) - case i <= math.MaxInt16: - return mw.prefix16(mint16, uint16(i)) - case i <= math.MaxInt32: - return mw.prefix32(mint32, uint32(i)) - default: - return mw.prefix64(mint64, uint64(i)) - } - } - switch { - case i >= -32: - return mw.push(wnfixint(int8(i))) - case i >= math.MinInt8: - return mw.prefix8(mint8, uint8(i)) - case i >= math.MinInt16: - return mw.prefix16(mint16, uint16(i)) - case i >= math.MinInt32: - return mw.prefix32(mint32, uint32(i)) - default: - return mw.prefix64(mint64, uint64(i)) - } -} - -// WriteInt8 writes an int8 to the writer -func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } - -// WriteInt16 writes an int16 to the writer -func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } - -// WriteInt32 writes an int32 to the writer -func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } - -// WriteInt writes an int to the writer -func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } - -// WriteUint64 writes a uint64 to the writer -func (mw *Writer) WriteUint64(u uint64) error { - switch { - case u <= (1<<7)-1: - return mw.push(wfixint(uint8(u))) - case u <= math.MaxUint8: - return mw.prefix8(muint8, uint8(u)) - case u <= math.MaxUint16: - return mw.prefix16(muint16, uint16(u)) - case u <= math.MaxUint32: - return mw.prefix32(muint32, uint32(u)) - default: - return mw.prefix64(muint64, u) - } -} - -// WriteByte is analogous to WriteUint8 -func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } - -// WriteUint8 writes a uint8 to the writer -func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } - -// WriteUint16 writes a uint16 to the writer -func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } - -// WriteUint32 writes a uint32 to the writer -func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } - -// WriteUint writes a uint to the writer -func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } - -// WriteBytes writes binary as 'bin' to the writer -func (mw *Writer) WriteBytes(b []byte) error { - sz := uint32(len(b)) - var err error - switch { - case sz <= math.MaxUint8: - err = mw.prefix8(mbin8, uint8(sz)) - case sz <= math.MaxUint16: - err = mw.prefix16(mbin16, uint16(sz)) - default: - err = mw.prefix32(mbin32, sz) - } - if err != nil { - return err - } - _, err = mw.Write(b) - return err -} - -// WriteBytesHeader writes just the size header -// of a MessagePack 'bin' object. The user is responsible -// for then writing 'sz' more bytes into the stream. -func (mw *Writer) WriteBytesHeader(sz uint32) error { - switch { - case sz <= math.MaxUint8: - return mw.prefix8(mbin8, uint8(sz)) - case sz <= math.MaxUint16: - return mw.prefix16(mbin16, uint16(sz)) - default: - return mw.prefix32(mbin32, sz) - } -} - -// WriteBool writes a bool to the writer -func (mw *Writer) WriteBool(b bool) error { - if b { - return mw.push(mtrue) - } - return mw.push(mfalse) -} - -// WriteString writes a messagepack string to the writer. -// (This is NOT an implementation of io.StringWriter) -func (mw *Writer) WriteString(s string) error { - sz := uint32(len(s)) - var err error - switch { - case sz <= 31: - err = mw.push(wfixstr(uint8(sz))) - case sz <= math.MaxUint8: - err = mw.prefix8(mstr8, uint8(sz)) - case sz <= math.MaxUint16: - err = mw.prefix16(mstr16, uint16(sz)) - default: - err = mw.prefix32(mstr32, sz) - } - if err != nil { - return err - } - return mw.writeString(s) -} - -// WriteStringHeader writes just the string size -// header of a MessagePack 'str' object. The user -// is responsible for writing 'sz' more valid UTF-8 -// bytes to the stream. -func (mw *Writer) WriteStringHeader(sz uint32) error { - switch { - case sz <= 31: - return mw.push(wfixstr(uint8(sz))) - case sz <= math.MaxUint8: - return mw.prefix8(mstr8, uint8(sz)) - case sz <= math.MaxUint16: - return mw.prefix16(mstr16, uint16(sz)) - default: - return mw.prefix32(mstr32, sz) - } -} - -// WriteStringFromBytes writes a 'str' object -// from a []byte. -func (mw *Writer) WriteStringFromBytes(str []byte) error { - sz := uint32(len(str)) - var err error - switch { - case sz <= 31: - err = mw.push(wfixstr(uint8(sz))) - case sz <= math.MaxUint8: - err = mw.prefix8(mstr8, uint8(sz)) - case sz <= math.MaxUint16: - err = mw.prefix16(mstr16, uint16(sz)) - default: - err = mw.prefix32(mstr32, sz) - } - if err != nil { - return err - } - _, err = mw.Write(str) - return err -} - -// WriteComplex64 writes a complex64 to the writer -func (mw *Writer) WriteComplex64(f complex64) error { - o, err := mw.require(10) - if err != nil { - return err - } - mw.buf[o] = mfixext8 - mw.buf[o+1] = Complex64Extension - big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) - big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) - return nil -} - -// WriteComplex128 writes a complex128 to the writer -func (mw *Writer) WriteComplex128(f complex128) error { - o, err := mw.require(18) - if err != nil { - return err - } - mw.buf[o] = mfixext16 - mw.buf[o+1] = Complex128Extension - big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) - big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) - return nil -} - -// WriteMapStrStr writes a map[string]string to the writer -func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { - err = mw.WriteMapHeader(uint32(len(mp))) - if err != nil { - return - } - for key, val := range mp { - err = mw.WriteString(key) - if err != nil { - return - } - err = mw.WriteString(val) - if err != nil { - return - } - } - return nil -} - -// WriteMapStrIntf writes a map[string]interface to the writer -func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { - err = mw.WriteMapHeader(uint32(len(mp))) - if err != nil { - return - } - for key, val := range mp { - err = mw.WriteString(key) - if err != nil { - return - } - err = mw.WriteIntf(val) - if err != nil { - return - } - } - return -} - -// WriteTime writes a time.Time object to the wire. -// -// Time is encoded as Unix time, which means that -// location (time zone) data is removed from the object. -// The encoded object itself is 12 bytes: 8 bytes for -// a big-endian 64-bit integer denoting seconds -// elapsed since "zero" Unix time, followed by 4 bytes -// for a big-endian 32-bit signed integer denoting -// the nanosecond offset of the time. This encoding -// is intended to ease portability across languages. -// (Note that this is *not* the standard time.Time -// binary encoding, because its implementation relies -// heavily on the internal representation used by the -// time package.) -func (mw *Writer) WriteTime(t time.Time) error { - t = t.UTC() - o, err := mw.require(15) - if err != nil { - return err - } - mw.buf[o] = mext8 - mw.buf[o+1] = 12 - mw.buf[o+2] = TimeExtension - putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) - return nil -} - -// WriteIntf writes the concrete type of 'v'. -// WriteIntf will error if 'v' is not one of the following: -// - A bool, float, string, []byte, int, uint, or complex -// - A map of supported types (with string keys) -// - An array or slice of supported types -// - A pointer to a supported type -// - A type that satisfies the msgp.Encodable interface -// - A type that satisfies the msgp.Extension interface -func (mw *Writer) WriteIntf(v interface{}) error { - if v == nil { - return mw.WriteNil() - } - switch v := v.(type) { - - // preferred interfaces - - case Encodable: - return v.EncodeMsg(mw) - case Extension: - return mw.WriteExtension(v) - - // concrete types - - case bool: - return mw.WriteBool(v) - case float32: - return mw.WriteFloat32(v) - case float64: - return mw.WriteFloat64(v) - case complex64: - return mw.WriteComplex64(v) - case complex128: - return mw.WriteComplex128(v) - case uint8: - return mw.WriteUint8(v) - case uint16: - return mw.WriteUint16(v) - case uint32: - return mw.WriteUint32(v) - case uint64: - return mw.WriteUint64(v) - case uint: - return mw.WriteUint(v) - case int8: - return mw.WriteInt8(v) - case int16: - return mw.WriteInt16(v) - case int32: - return mw.WriteInt32(v) - case int64: - return mw.WriteInt64(v) - case int: - return mw.WriteInt(v) - case string: - return mw.WriteString(v) - case []byte: - return mw.WriteBytes(v) - case map[string]string: - return mw.WriteMapStrStr(v) - case map[string]interface{}: - return mw.WriteMapStrIntf(v) - case time.Time: - return mw.WriteTime(v) - } - - val := reflect.ValueOf(v) - if !isSupported(val.Kind()) || !val.IsValid() { - return fmt.Errorf("msgp: type %s not supported", val) - } - - switch val.Kind() { - case reflect.Ptr: - if val.IsNil() { - return mw.WriteNil() - } - return mw.WriteIntf(val.Elem().Interface()) - case reflect.Slice: - return mw.writeSlice(val) - case reflect.Map: - return mw.writeMap(val) - } - return &ErrUnsupportedType{T: val.Type()} -} - -func (mw *Writer) writeMap(v reflect.Value) (err error) { - if v.Type().Key().Kind() != reflect.String { - return errors.New("msgp: map keys must be strings") - } - ks := v.MapKeys() - err = mw.WriteMapHeader(uint32(len(ks))) - if err != nil { - return - } - for _, key := range ks { - val := v.MapIndex(key) - err = mw.WriteString(key.String()) - if err != nil { - return - } - err = mw.WriteIntf(val.Interface()) - if err != nil { - return - } - } - return -} - -func (mw *Writer) writeSlice(v reflect.Value) (err error) { - // is []byte - if v.Type().ConvertibleTo(btsType) { - return mw.WriteBytes(v.Bytes()) - } - - sz := uint32(v.Len()) - err = mw.WriteArrayHeader(sz) - if err != nil { - return - } - for i := uint32(0); i < sz; i++ { - err = mw.WriteIntf(v.Index(int(i)).Interface()) - if err != nil { - return - } - } - return -} - -func (mw *Writer) writeStruct(v reflect.Value) error { - if enc, ok := v.Interface().(Encodable); ok { - return enc.EncodeMsg(mw) - } - return fmt.Errorf("msgp: unsupported type: %s", v.Type()) -} - -func (mw *Writer) writeVal(v reflect.Value) error { - if !isSupported(v.Kind()) { - return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) - } - - // shortcut for nil values - if v.IsNil() { - return mw.WriteNil() - } - switch v.Kind() { - case reflect.Bool: - return mw.WriteBool(v.Bool()) - - case reflect.Float32, reflect.Float64: - return mw.WriteFloat64(v.Float()) - - case reflect.Complex64, reflect.Complex128: - return mw.WriteComplex128(v.Complex()) - - case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: - return mw.WriteInt64(v.Int()) - - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - mw.WriteNil() - } - return mw.writeVal(v.Elem()) - - case reflect.Map: - return mw.writeMap(v) - - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: - return mw.WriteUint64(v.Uint()) - - case reflect.String: - return mw.WriteString(v.String()) - - case reflect.Slice, reflect.Array: - return mw.writeSlice(v) - - case reflect.Struct: - return mw.writeStruct(v) - - } - return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) -} - -// is the reflect.Kind encodable? -func isSupported(k reflect.Kind) bool { - switch k { - case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: - return false - default: - return true - } -} - -// GuessSize guesses the size of the underlying -// value of 'i'. If the underlying value is not -// a simple builtin (or []byte), GuessSize defaults -// to 512. -func GuessSize(i interface{}) int { - if i == nil { - return NilSize - } - - switch i := i.(type) { - case Sizer: - return i.Msgsize() - case Extension: - return ExtensionPrefixSize + i.Len() - case float64: - return Float64Size - case float32: - return Float32Size - case uint8, uint16, uint32, uint64, uint: - return UintSize - case int8, int16, int32, int64, int: - return IntSize - case []byte: - return BytesPrefixSize + len(i) - case string: - return StringPrefixSize + len(i) - case complex64: - return Complex64Size - case complex128: - return Complex128Size - case bool: - return BoolSize - case map[string]interface{}: - s := MapHeaderSize - for key, val := range i { - s += StringPrefixSize + len(key) + GuessSize(val) - } - return s - case map[string]string: - s := MapHeaderSize - for key, val := range i { - s += 2*StringPrefixSize + len(key) + len(val) - } - return s - default: - return 512 - } -} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go deleted file mode 100644 index eaa03c46e..000000000 --- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go +++ /dev/null @@ -1,411 +0,0 @@ -package msgp - -import ( - "math" - "reflect" - "time" -) - -// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) -func ensure(b []byte, sz int) ([]byte, int) { - l := len(b) - c := cap(b) - if c-l < sz { - o := make([]byte, (2*c)+sz) // exponential growth - n := copy(o, b) - return o[:n+sz], n - } - return b[:l+sz], l -} - -// AppendMapHeader appends a map header with the -// given size to the slice -func AppendMapHeader(b []byte, sz uint32) []byte { - switch { - case sz <= 15: - return append(b, wfixmap(uint8(sz))) - - case sz <= math.MaxUint16: - o, n := ensure(b, 3) - prefixu16(o[n:], mmap16, uint16(sz)) - return o - - default: - o, n := ensure(b, 5) - prefixu32(o[n:], mmap32, sz) - return o - } -} - -// AppendArrayHeader appends an array header with -// the given size to the slice -func AppendArrayHeader(b []byte, sz uint32) []byte { - switch { - case sz <= 15: - return append(b, wfixarray(uint8(sz))) - - case sz <= math.MaxUint16: - o, n := ensure(b, 3) - prefixu16(o[n:], marray16, uint16(sz)) - return o - - default: - o, n := ensure(b, 5) - prefixu32(o[n:], marray32, sz) - return o - } -} - -// AppendNil appends a 'nil' byte to the slice -func AppendNil(b []byte) []byte { return append(b, mnil) } - -// AppendFloat64 appends a float64 to the slice -func AppendFloat64(b []byte, f float64) []byte { - o, n := ensure(b, Float64Size) - prefixu64(o[n:], mfloat64, math.Float64bits(f)) - return o -} - -// AppendFloat32 appends a float32 to the slice -func AppendFloat32(b []byte, f float32) []byte { - o, n := ensure(b, Float32Size) - prefixu32(o[n:], mfloat32, math.Float32bits(f)) - return o -} - -// AppendInt64 appends an int64 to the slice -func AppendInt64(b []byte, i int64) []byte { - if i >= 0 { - switch { - case i <= math.MaxInt8: - return append(b, wfixint(uint8(i))) - case i <= math.MaxInt16: - o, n := ensure(b, 3) - putMint16(o[n:], int16(i)) - return o - case i <= math.MaxInt32: - o, n := ensure(b, 5) - putMint32(o[n:], int32(i)) - return o - default: - o, n := ensure(b, 9) - putMint64(o[n:], i) - return o - } - } - switch { - case i >= -32: - return append(b, wnfixint(int8(i))) - case i >= math.MinInt8: - o, n := ensure(b, 2) - putMint8(o[n:], int8(i)) - return o - case i >= math.MinInt16: - o, n := ensure(b, 3) - putMint16(o[n:], int16(i)) - return o - case i >= math.MinInt32: - o, n := ensure(b, 5) - putMint32(o[n:], int32(i)) - return o - default: - o, n := ensure(b, 9) - putMint64(o[n:], i) - return o - } -} - -// AppendInt appends an int to the slice -func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } - -// AppendInt8 appends an int8 to the slice -func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } - -// AppendInt16 appends an int16 to the slice -func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } - -// AppendInt32 appends an int32 to the slice -func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } - -// AppendUint64 appends a uint64 to the slice -func AppendUint64(b []byte, u uint64) []byte { - switch { - case u <= (1<<7)-1: - return append(b, wfixint(uint8(u))) - - case u <= math.MaxUint8: - o, n := ensure(b, 2) - putMuint8(o[n:], uint8(u)) - return o - - case u <= math.MaxUint16: - o, n := ensure(b, 3) - putMuint16(o[n:], uint16(u)) - return o - - case u <= math.MaxUint32: - o, n := ensure(b, 5) - putMuint32(o[n:], uint32(u)) - return o - - default: - o, n := ensure(b, 9) - putMuint64(o[n:], u) - return o - - } -} - -// AppendUint appends a uint to the slice -func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } - -// AppendUint8 appends a uint8 to the slice -func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } - -// AppendByte is analogous to AppendUint8 -func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } - -// AppendUint16 appends a uint16 to the slice -func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } - -// AppendUint32 appends a uint32 to the slice -func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } - -// AppendBytes appends bytes to the slice as MessagePack 'bin' data -func AppendBytes(b []byte, bts []byte) []byte { - sz := len(bts) - var o []byte - var n int - switch { - case sz <= math.MaxUint8: - o, n = ensure(b, 2+sz) - prefixu8(o[n:], mbin8, uint8(sz)) - n += 2 - case sz <= math.MaxUint16: - o, n = ensure(b, 3+sz) - prefixu16(o[n:], mbin16, uint16(sz)) - n += 3 - default: - o, n = ensure(b, 5+sz) - prefixu32(o[n:], mbin32, uint32(sz)) - n += 5 - } - return o[:n+copy(o[n:], bts)] -} - -// AppendBool appends a bool to the slice -func AppendBool(b []byte, t bool) []byte { - if t { - return append(b, mtrue) - } - return append(b, mfalse) -} - -// AppendString appends a string as a MessagePack 'str' to the slice -func AppendString(b []byte, s string) []byte { - sz := len(s) - var n int - var o []byte - switch { - case sz <= 31: - o, n = ensure(b, 1+sz) - o[n] = wfixstr(uint8(sz)) - n++ - case sz <= math.MaxUint8: - o, n = ensure(b, 2+sz) - prefixu8(o[n:], mstr8, uint8(sz)) - n += 2 - case sz <= math.MaxUint16: - o, n = ensure(b, 3+sz) - prefixu16(o[n:], mstr16, uint16(sz)) - n += 3 - default: - o, n = ensure(b, 5+sz) - prefixu32(o[n:], mstr32, uint32(sz)) - n += 5 - } - return o[:n+copy(o[n:], s)] -} - -// AppendStringFromBytes appends a []byte -// as a MessagePack 'str' to the slice 'b.' -func AppendStringFromBytes(b []byte, str []byte) []byte { - sz := len(str) - var n int - var o []byte - switch { - case sz <= 31: - o, n = ensure(b, 1+sz) - o[n] = wfixstr(uint8(sz)) - n++ - case sz <= math.MaxUint8: - o, n = ensure(b, 2+sz) - prefixu8(o[n:], mstr8, uint8(sz)) - n += 2 - case sz <= math.MaxUint16: - o, n = ensure(b, 3+sz) - prefixu16(o[n:], mstr16, uint16(sz)) - n += 3 - default: - o, n = ensure(b, 5+sz) - prefixu32(o[n:], mstr32, uint32(sz)) - n += 5 - } - return o[:n+copy(o[n:], str)] -} - -// AppendComplex64 appends a complex64 to the slice as a MessagePack extension -func AppendComplex64(b []byte, c complex64) []byte { - o, n := ensure(b, Complex64Size) - o[n] = mfixext8 - o[n+1] = Complex64Extension - big.PutUint32(o[n+2:], math.Float32bits(real(c))) - big.PutUint32(o[n+6:], math.Float32bits(imag(c))) - return o -} - -// AppendComplex128 appends a complex128 to the slice as a MessagePack extension -func AppendComplex128(b []byte, c complex128) []byte { - o, n := ensure(b, Complex128Size) - o[n] = mfixext16 - o[n+1] = Complex128Extension - big.PutUint64(o[n+2:], math.Float64bits(real(c))) - big.PutUint64(o[n+10:], math.Float64bits(imag(c))) - return o -} - -// AppendTime appends a time.Time to the slice as a MessagePack extension -func AppendTime(b []byte, t time.Time) []byte { - o, n := ensure(b, TimeSize) - t = t.UTC() - o[n] = mext8 - o[n+1] = 12 - o[n+2] = TimeExtension - putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) - return o -} - -// AppendMapStrStr appends a map[string]string to the slice -// as a MessagePack map with 'str'-type keys and values -func AppendMapStrStr(b []byte, m map[string]string) []byte { - sz := uint32(len(m)) - b = AppendMapHeader(b, sz) - for key, val := range m { - b = AppendString(b, key) - b = AppendString(b, val) - } - return b -} - -// AppendMapStrIntf appends a map[string]interface{} to the slice -// as a MessagePack map with 'str'-type keys. -func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { - sz := uint32(len(m)) - b = AppendMapHeader(b, sz) - var err error - for key, val := range m { - b = AppendString(b, key) - b, err = AppendIntf(b, val) - if err != nil { - return b, err - } - } - return b, nil -} - -// AppendIntf appends the concrete type of 'i' to the -// provided []byte. 'i' must be one of the following: -// - 'nil' -// - A bool, float, string, []byte, int, uint, or complex -// - A map[string]interface{} or map[string]string -// - A []T, where T is another supported type -// - A *T, where T is another supported type -// - A type that satisfieds the msgp.Marshaler interface -// - A type that satisfies the msgp.Extension interface -func AppendIntf(b []byte, i interface{}) ([]byte, error) { - if i == nil { - return AppendNil(b), nil - } - - // all the concrete types - // for which we have methods - switch i := i.(type) { - case Marshaler: - return i.MarshalMsg(b) - case Extension: - return AppendExtension(b, i) - case bool: - return AppendBool(b, i), nil - case float32: - return AppendFloat32(b, i), nil - case float64: - return AppendFloat64(b, i), nil - case complex64: - return AppendComplex64(b, i), nil - case complex128: - return AppendComplex128(b, i), nil - case string: - return AppendString(b, i), nil - case []byte: - return AppendBytes(b, i), nil - case int8: - return AppendInt8(b, i), nil - case int16: - return AppendInt16(b, i), nil - case int32: - return AppendInt32(b, i), nil - case int64: - return AppendInt64(b, i), nil - case int: - return AppendInt64(b, int64(i)), nil - case uint: - return AppendUint64(b, uint64(i)), nil - case uint8: - return AppendUint8(b, i), nil - case uint16: - return AppendUint16(b, i), nil - case uint32: - return AppendUint32(b, i), nil - case uint64: - return AppendUint64(b, i), nil - case time.Time: - return AppendTime(b, i), nil - case map[string]interface{}: - return AppendMapStrIntf(b, i) - case map[string]string: - return AppendMapStrStr(b, i), nil - case []interface{}: - b = AppendArrayHeader(b, uint32(len(i))) - var err error - for _, k := range i { - b, err = AppendIntf(b, k) - if err != nil { - return b, err - } - } - return b, nil - } - - var err error - v := reflect.ValueOf(i) - switch v.Kind() { - case reflect.Array, reflect.Slice: - l := v.Len() - b = AppendArrayHeader(b, uint32(l)) - for i := 0; i < l; i++ { - b, err = AppendIntf(b, v.Index(i).Interface()) - if err != nil { - return b, err - } - } - return b, nil - case reflect.Ptr: - if v.IsNil() { - return AppendNil(b), err - } - b, err = AppendIntf(b, v.Elem().Interface()) - return b, err - default: - return b, &ErrUnsupportedType{T: v.Type()} - } -} diff --git a/vendor/github.com/willf/bitset/.gitignore b/vendor/github.com/willf/bitset/.gitignore deleted file mode 100644 index 5c204d28b..000000000 --- a/vendor/github.com/willf/bitset/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -target diff --git a/vendor/github.com/willf/bitset/.travis.yml b/vendor/github.com/willf/bitset/.travis.yml deleted file mode 100644 index 094aa5ce0..000000000 --- a/vendor/github.com/willf/bitset/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -sudo: false - -branches: - except: - - release - -branches: - only: - - master - - travis - -go: - - "1.11.x" - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; - - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; - - go get github.com/mattn/goveralls - -before_script: - - make deps - -script: - - make qa - -after_failure: - - cat ./target/test/report.xml - -after_success: - - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/willf/bitset/LICENSE b/vendor/github.com/willf/bitset/LICENSE deleted file mode 100644 index 59cab8a93..000000000 --- a/vendor/github.com/willf/bitset/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 Will Fitzgerald. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/willf/bitset/Makefile b/vendor/github.com/willf/bitset/Makefile deleted file mode 100644 index ad71f6a4a..000000000 --- a/vendor/github.com/willf/bitset/Makefile +++ /dev/null @@ -1,197 +0,0 @@ -# MAKEFILE -# -# @author Nicola Asuni -# @link https://github.com/willf/bitset -# ------------------------------------------------------------------------------ - -# List special make targets that are not associated with files -.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke - -# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS). -SHELL=/bin/bash - -# CVS path (path to the parent dir containing the project) -CVSPATH=github.com/willf - -# Project owner -OWNER=willf - -# Project vendor -VENDOR=willf - -# Project name -PROJECT=bitset - -# Project version -VERSION=$(shell cat VERSION) - -# Name of RPM or DEB package -PKGNAME=${VENDOR}-${PROJECT} - -# Current directory -CURRENTDIR=$(shell pwd) - -# GO lang path -ifneq ($(GOPATH),) - ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),) - # the defined GOPATH is not valid - GOPATH= - endif -endif -ifeq ($(GOPATH),) - # extract the GOPATH - GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR))) -endif - -# --- MAKE TARGETS --- - -# Display general help about this command -help: - @echo "" - @echo "$(PROJECT) Makefile." - @echo "GOPATH=$(GOPATH)" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo " make coverage : Generate the coverage report" - @echo " make cyclo : Generate the cyclomatic complexity report" - @echo " make ineffassign : Detect ineffectual assignments" - @echo " make misspell : Detect commonly misspelled words in source files" - @echo " make structcheck : Find unused struct fields" - @echo " make varcheck : Find unused global variables and constants" - @echo " make errcheck : Check that error return values are used" - @echo " make gosimple : Suggest code simplifications" - @echo " make astscan : GO AST scanner" - @echo "" - @echo " make docs : Generate source code documentation" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - -# Alias for help target -all: help - -# Run the unit tests -test: - @mkdir -p target/test - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go test \ - -covermode=atomic \ - -bench=. \ - -race \ - -cpuprofile=target/report/cpu.out \ - -memprofile=target/report/mem.out \ - -mutexprofile=target/report/mutex.out \ - -coverprofile=target/report/coverage.out \ - -v ./... | \ - tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \ - test $${PIPESTATUS[0]} -eq 0 - -# Format the source code -format: - @find . -type f -name "*.go" -exec gofmt -s -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet . - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint . - -# Generate the coverage report -coverage: - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go tool cover -html=target/report/coverage.out -o target/report/coverage.html - -# Report cyclomatic complexity -cyclo: - @mkdir -p target/report - GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect ineffectual assignments -ineffassign: - @mkdir -p target/report - GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect commonly misspelled words in source files -misspell: - @mkdir -p target/report - GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Find unused struct fields -structcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt - -# Find unused global variables and constants -varcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt - -# Check that error return values are used -errcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt - -# Suggest code simplifications -gosimple: - @mkdir -p target/report - GOPATH=$(GOPATH) gosimple ./ | tee target/report/gosimple.txt - -# AST scanner -astscan: - @mkdir -p target/report - GOPATH=$(GOPATH) gosec . | tee target/report/astscan.txt ; test $${PIPESTATUS[0]} -eq 0 || true - -# Generate source docs -docs: - @mkdir -p target/docs - nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 & - wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060` - @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get ./... - GOPATH=$(GOPATH) go get golang.org/x/lint/golint - GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report - GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov - GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo - GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign - GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck - GOPATH=$(GOPATH) go get github.com/kisielk/errcheck - GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/gosimple - GOPATH=$(GOPATH) go get github.com/securego/gosec/cmd/gosec/... - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... diff --git a/vendor/github.com/willf/bitset/README.md b/vendor/github.com/willf/bitset/README.md deleted file mode 100644 index 6c62b20c6..000000000 --- a/vendor/github.com/willf/bitset/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# bitset - -*Go language library to map between non-negative integers and boolean values* - -[![Master Build Status](https://secure.travis-ci.org/willf/bitset.png?branch=master)](https://travis-ci.org/willf/bitset?branch=master) -[![Master Coverage Status](https://coveralls.io/repos/willf/bitset/badge.svg?branch=master&service=github)](https://coveralls.io/github/willf/bitset?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) -[![GoDoc](https://godoc.org/github.com/willf/bitset?status.svg)](http://godoc.org/github.com/willf/bitset) - - -## Description - -Package bitset implements bitsets, a mapping between non-negative integers and boolean values. -It should be more efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing individual integers. - -But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. - -Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. - -### Example use: - -```go -package main - -import ( - "fmt" - "math/rand" - - "github.com/willf/bitset" -) - -func main() { - fmt.Printf("Hello from BitSet!\n") - var b bitset.BitSet - // play some Go Fish - for i := 0; i < 100; i++ { - card1 := uint(rand.Intn(52)) - card2 := uint(rand.Intn(52)) - b.Set(card1) - if b.Test(card2) { - fmt.Println("Go Fish!") - } - b.Clear(card1) - } - - // Chaining - b.Set(10).Set(11) - - for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { - fmt.Println("The following bit is set:", i) - } - if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { - fmt.Println("Intersection works.") - } else { - fmt.Println("Intersection doesn't work???") - } -} -``` - -As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. - -Godoc documentation is at: https://godoc.org/github.com/willf/bitset - - -## Implementation Note - -Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. - -It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. - -## Installation - -```bash -go get github.com/willf/bitset -``` - -## Contributing - -If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") - -This project include a Makefile that allows you to test and build the project with simple commands. -To see all available options: -```bash -make help -``` - -## Running all tests - -Before committing the code, please check if it passes all tests using (note: this will install some dependencies): -```bash -make qa -``` diff --git a/vendor/github.com/willf/bitset/bitset.go b/vendor/github.com/willf/bitset/bitset.go deleted file mode 100644 index 32044f5c8..000000000 --- a/vendor/github.com/willf/bitset/bitset.go +++ /dev/null @@ -1,877 +0,0 @@ -/* -Package bitset implements bitsets, a mapping -between non-negative integers and boolean values. It should be more -efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing -individual integers. - -But it also provides set intersection, union, difference, -complement, and symmetric operations, as well as tests to -check whether any, all, or no bits are set, and querying a -bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the -memory allocation is approximately Max bits, where Max is -the largest set bit. BitSets are never shrunk. On creation, -a hint can be given for the number of bits that will be used. - -Many of the methods, including Set,Clear, and Flip, return -a BitSet pointer, which allows for chaining. - -Example use: - - import "bitset" - var b BitSet - b.Set(10).Set(11) - if b.Test(1000) { - b.Clear(1000) - } - if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { - fmt.Println("Intersection works.") - } - -As an alternative to BitSets, one should check out the 'big' package, -which provides a (less set-theoretical) view of bitsets. - -*/ -package bitset - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// the wordSize of a bit set -const wordSize = uint(64) - -// log2WordSize is lg(wordSize) -const log2WordSize = uint(6) - -// allBits has every bit set -const allBits uint64 = 0xffffffffffffffff - -// default binary BigEndian -var binaryOrder binary.ByteOrder = binary.BigEndian - -// default json encoding base64.URLEncoding -var base64Encoding = base64.URLEncoding - -// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) -func Base64StdEncoding() { base64Encoding = base64.StdEncoding } - -// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) -func LittleEndian() { binaryOrder = binary.LittleEndian } - -// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. -type BitSet struct { - length uint - set []uint64 -} - -// Error is used to distinguish errors (panics) generated in this package. -type Error string - -// safeSet will fixup b.set to be non-nil and return the field value -func (b *BitSet) safeSet() []uint64 { - if b.set == nil { - b.set = make([]uint64, wordsNeeded(0)) - } - return b.set -} - -// From is a constructor used to create a BitSet from an array of integers -func From(buf []uint64) *BitSet { - return &BitSet{uint(len(buf)) * 64, buf} -} - -// Bytes returns the bitset as array of integers -func (b *BitSet) Bytes() []uint64 { - return b.set -} - -// wordsNeeded calculates the number of words needed for i bits -func wordsNeeded(i uint) int { - if i > (Cap() - wordSize + 1) { - return int(Cap() >> log2WordSize) - } - return int((i + (wordSize - 1)) >> log2WordSize) -} - -// New creates a new BitSet with a hint that length bits will be required -func New(length uint) (bset *BitSet) { - defer func() { - if r := recover(); r != nil { - bset = &BitSet{ - 0, - make([]uint64, 0), - } - } - }() - - bset = &BitSet{ - length, - make([]uint64, wordsNeeded(length)), - } - - return bset -} - -// Cap returns the total possible capacity, or number of bits -func Cap() uint { - return ^uint(0) -} - -// Len returns the length of the BitSet in words -func (b *BitSet) Len() uint { - return b.length -} - -// extendSetMaybe adds additional words to incorporate new bits if needed -func (b *BitSet) extendSetMaybe(i uint) { - if i >= b.length { // if we need more bits, make 'em - nsize := wordsNeeded(i + 1) - if b.set == nil { - b.set = make([]uint64, nsize) - } else if cap(b.set) >= nsize { - b.set = b.set[:nsize] // fast resize - } else if len(b.set) < nsize { - newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x - copy(newset, b.set) - b.set = newset - } - b.length = i + 1 - } -} - -// Test whether bit i is set. -func (b *BitSet) Test(i uint) bool { - if i >= b.length { - return false - } - return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 -} - -// Set bit i to 1 -func (b *BitSet) Set(i uint) *BitSet { - b.extendSetMaybe(i) - b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) - return b -} - -// Clear bit i to 0 -func (b *BitSet) Clear(i uint) *BitSet { - if i >= b.length { - return b - } - b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) - return b -} - -// SetTo sets bit i to value -func (b *BitSet) SetTo(i uint, value bool) *BitSet { - if value { - return b.Set(i) - } - return b.Clear(i) -} - -// Flip bit at i -func (b *BitSet) Flip(i uint) *BitSet { - if i >= b.length { - return b.Set(i) - } - b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) - return b -} - -// Shrink shrinks BitSet to desired length in bits. It clears all bits > length -// and reduces the size and length of the set. -// -// A new slice is allocated to store the new bits, so you may see an increase in -// memory usage until the GC runs. Normally this should not be a problem, but if you -// have an extremely large BitSet its important to understand that the old BitSet will -// remain in memory until the GC frees it. -func (b *BitSet) Shrink(length uint) *BitSet { - idx := wordsNeeded(length + 1) - if idx > len(b.set) { - return b - } - shrunk := make([]uint64, idx) - copy(shrunk, b.set[:idx]) - b.set = shrunk - b.length = length + 1 - b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)) - 1)) - return b -} - -// InsertAt takes an index which indicates where a bit should be -// inserted. Then it shifts all the bits in the set to the left by 1, starting -// from the given index position, and sets the index position to 0. -// -// Depending on the size of your BitSet, and where you are inserting the new entry, -// this method could be extremely slow and in some cases might cause the entire BitSet -// to be recopied. -func (b *BitSet) InsertAt(idx uint) *BitSet { - insertAtElement := (idx >> log2WordSize) - - // if length of set is a multiple of wordSize we need to allocate more space first - if b.isLenExactMultiple() { - b.set = append(b.set, uint64(0)) - } - - var i uint - for i = uint(len(b.set) - 1); i > insertAtElement; i-- { - // all elements above the position where we want to insert can simply by shifted - b.set[i] <<= 1 - - // we take the most significant bit of the previous element and set it as - // the least significant bit of the current element - b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 - } - - // generate a mask to extract the data that we need to shift left - // within the element where we insert a bit - dataMask := ^(uint64(1)< 0x40000 { - buffer.WriteString("...") - break - } - buffer.WriteString(strconv.FormatInt(int64(i), 10)) - i, e = b.NextSet(i + 1) - if e { - buffer.WriteString(",") - } - } - buffer.WriteString("}") - return buffer.String() -} - -// DeleteAt deletes the bit at the given index position from -// within the bitset -// All the bits residing on the left of the deleted bit get -// shifted right by 1 -// The running time of this operation may potentially be -// relatively slow, O(length) -func (b *BitSet) DeleteAt(i uint) *BitSet { - // the index of the slice element where we'll delete a bit - deleteAtElement := i >> log2WordSize - - // generate a mask for the data that needs to be shifted right - // within that slice element that gets modified - dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) - - // extract the data that we'll shift right from the slice element - data := b.set[deleteAtElement] & dataMask - - // set the masked area to 0 while leaving the rest as it is - b.set[deleteAtElement] &= ^dataMask - - // shift the previously extracted data to the right and then - // set it in the previously masked area - b.set[deleteAtElement] |= (data >> 1) & dataMask - - // loop over all the consecutive slice elements to copy each - // lowest bit into the highest position of the previous element, - // then shift the entire content to the right by 1 - for i := int(deleteAtElement) + 1; i < len(b.set); i++ { - b.set[i-1] |= (b.set[i] & 1) << 63 - b.set[i] >>= 1 - } - - b.length = b.length - 1 - - return b -} - -// NextSet returns the next bit set from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no set bit found) -// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} -func (b *BitSet) NextSet(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - if w != 0 { - return i + trailingZeroes64(w), true - } - x = x + 1 - for x < len(b.set) { - if b.set[x] != 0 { - return uint(x)*wordSize + trailingZeroes64(b.set[x]), true - } - x = x + 1 - - } - return 0, false -} - -// NextSetMany returns many next bit sets from the specified index, -// including possibly the current index and up to cap(buffer). -// If the returned slice has len zero, then no more set bits were found -// -// buffer := make([]uint, 256) // this should be reused -// j := uint(0) -// j, buffer = bitmap.NextSetMany(j, buffer) -// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { -// for k := range buffer { -// do something with buffer[k] -// } -// j += 1 -// } -// -func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { - myanswer := buffer - capacity := cap(buffer) - x := int(i >> log2WordSize) - if x >= len(b.set) || capacity == 0 { - return 0, myanswer[:0] - } - skip := i & (wordSize - 1) - word := b.set[x] >> skip - myanswer = myanswer[:capacity] - size := int(0) - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + i - size++ - if size == capacity { - goto End - } - word = word ^ t - } - x++ - for idx, word := range b.set[x:] { - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + (uint(x+idx) << 6) - size++ - if size == capacity { - goto End - } - word = word ^ t - } - } -End: - if size > 0 { - return myanswer[size-1], myanswer[:size] - } - return 0, myanswer[:0] -} - -// NextClear returns the next clear bit from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no bit found i.e. all bits are set) -func (b *BitSet) NextClear(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - wA := allBits >> (i & (wordSize - 1)) - index := i + trailingZeroes64(^w) - if w != wA && index < b.length { - return index, true - } - x++ - for x < len(b.set) { - index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) - if b.set[x] != allBits && index < b.length { - return index, true - } - x++ - } - return 0, false -} - -// ClearAll clears the entire BitSet -func (b *BitSet) ClearAll() *BitSet { - if b != nil && b.set != nil { - for i := range b.set { - b.set[i] = 0 - } - } - return b -} - -// wordCount returns the number of words used in a bit set -func (b *BitSet) wordCount() int { - return len(b.set) -} - -// Clone this BitSet -func (b *BitSet) Clone() *BitSet { - c := New(b.length) - if b.set != nil { // Clone should not modify current object - copy(c.set, b.set) - } - return c -} - -// Copy into a destination BitSet -// Returning the size of the destination BitSet -// like array copy -func (b *BitSet) Copy(c *BitSet) (count uint) { - if c == nil { - return - } - if b.set != nil { // Copy should not modify current object - copy(c.set, b.set) - } - count = c.length - if b.length < c.length { - count = b.length - } - return -} - -// Count (number of set bits) -func (b *BitSet) Count() uint { - if b != nil && b.set != nil { - return uint(popcntSlice(b.set)) - } - return 0 -} - -// Equal tests the equvalence of two BitSets. -// False if they are of different sizes, otherwise true -// only if all the same bits are set -func (b *BitSet) Equal(c *BitSet) bool { - if c == nil { - return false - } - if b.length != c.length { - return false - } - if b.length == 0 { // if they have both length == 0, then could have nil set - return true - } - // testing for equality shoud not transform the bitset (no call to safeSet) - - for p, v := range b.set { - if c.set[p] != v { - return false - } - } - return true -} - -func panicIfNull(b *BitSet) { - if b == nil { - panic(Error("BitSet must not be null")) - } -} - -// Difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - result = b.Clone() // clone b (in case b is bigger than compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - result.set[i] = b.set[i] &^ compare.set[i] - } - return -} - -// DifferenceCardinality computes the cardinality of the differnce -func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - cnt := uint64(0) - cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) - cnt += popcntSlice(b.set[l:]) - return uint(cnt) -} - -// InPlaceDifference computes the difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) InPlaceDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &^= compare.set[i] - } -} - -// Convenience function: return two bitsets ordered by -// increasing length. Note: neither can be nil -func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { - if a.length <= b.length { - ap, bp = a, b - } else { - ap, bp = b, a - } - return -} - -// Intersection of base set and other set -// This is the BitSet equivalent of & (and) -func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = New(b.length) - for i, word := range b.set { - result.set[i] = word & compare.set[i] - } - return -} - -// IntersectionCardinality computes the cardinality of the union -func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntAndSlice(b.set, compare.set) - return uint(cnt) -} - -// InPlaceIntersection destructively computes the intersection of -// base set and the compare set. -// This is the BitSet equivalent of & (and) -func (b *BitSet) InPlaceIntersection(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &= compare.set[i] - } - for i := l; i < len(b.set); i++ { - b.set[i] = 0 - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } -} - -// Union of base set and other set -// This is the BitSet equivalent of | (or) -func (b *BitSet) Union(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word | compare.set[i] - } - return -} - -// UnionCardinality computes the cardinality of the uniton of the base set -// and the compare set. -func (b *BitSet) UnionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntOrSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceUnion creates the destructive union of base set and compare set. -// This is the BitSet equivalent of | (or). -func (b *BitSet) InPlaceUnion(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] |= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - // compare is bigger, so clone it - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word ^ compare.set[i] - } - return -} - -// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference -func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntXorSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] ^= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// Is the length an exact multiple of word sizes? -func (b *BitSet) isLenExactMultiple() bool { - return b.length%wordSize == 0 -} - -// Clean last word by setting unused bits to 0 -func (b *BitSet) cleanLastWord() { - if !b.isLenExactMultiple() { - b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) - } -} - -// Complement computes the (local) complement of a biset (up to length bits) -func (b *BitSet) Complement() (result *BitSet) { - panicIfNull(b) - result = New(b.length) - for i, word := range b.set { - result.set[i] = ^word - } - result.cleanLastWord() - return -} - -// All returns true if all bits are set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) All() bool { - panicIfNull(b) - return b.Count() == b.length -} - -// None returns true if no bit is set, false otherwise. Retursn true for -// empty sets. -func (b *BitSet) None() bool { - panicIfNull(b) - if b != nil && b.set != nil { - for _, word := range b.set { - if word > 0 { - return false - } - } - return true - } - return true -} - -// Any returns true if any bit is set, false otherwise -func (b *BitSet) Any() bool { - panicIfNull(b) - return !b.None() -} - -// IsSuperSet returns true if this is a superset of the other set -func (b *BitSet) IsSuperSet(other *BitSet) bool { - for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { - if !b.Test(i) { - return false - } - } - return true -} - -// IsStrictSuperSet returns true if this is a strict superset of the other set -func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { - return b.Count() > other.Count() && b.IsSuperSet(other) -} - -// DumpAsBits dumps a bit set as a string of bits -func (b *BitSet) DumpAsBits() string { - if b.set == nil { - return "." - } - buffer := bytes.NewBufferString("") - i := len(b.set) - 1 - for ; i >= 0; i-- { - fmt.Fprintf(buffer, "%064b.", b.set[i]) - } - return buffer.String() -} - -// BinaryStorageSize returns the binary storage requirements -func (b *BitSet) BinaryStorageSize() int { - return binary.Size(uint64(0)) + binary.Size(b.set) -} - -// WriteTo writes a BitSet to a stream -func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { - length := uint64(b.length) - - // Write length - err := binary.Write(stream, binaryOrder, length) - if err != nil { - return 0, err - } - - // Write set - err = binary.Write(stream, binaryOrder, b.set) - return int64(b.BinaryStorageSize()), err -} - -// ReadFrom reads a BitSet from a stream written using WriteTo -func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { - var length uint64 - - // Read length first - err := binary.Read(stream, binaryOrder, &length) - if err != nil { - return 0, err - } - newset := New(uint(length)) - - if uint64(newset.length) != length { - return 0, errors.New("Unmarshalling error: type mismatch") - } - - // Read remaining bytes as set - err = binary.Read(stream, binaryOrder, newset.set) - if err != nil { - return 0, err - } - - *b = *newset - return int64(b.BinaryStorageSize()), nil -} - -// MarshalBinary encodes a BitSet into a binary form and returns the result. -func (b *BitSet) MarshalBinary() ([]byte, error) { - var buf bytes.Buffer - writer := bufio.NewWriter(&buf) - - _, err := b.WriteTo(writer) - if err != nil { - return []byte{}, err - } - - err = writer.Flush() - - return buf.Bytes(), err -} - -// UnmarshalBinary decodes the binary form generated by MarshalBinary. -func (b *BitSet) UnmarshalBinary(data []byte) error { - buf := bytes.NewReader(data) - reader := bufio.NewReader(buf) - - _, err := b.ReadFrom(reader) - - return err -} - -// MarshalJSON marshals a BitSet as a JSON structure -func (b *BitSet) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) - _, err := b.WriteTo(buffer) - if err != nil { - return nil, err - } - - // URLEncode all bytes - return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) -} - -// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON -func (b *BitSet) UnmarshalJSON(data []byte) error { - // Unmarshal as string - var s string - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - // URLDecode string - buf, err := base64Encoding.DecodeString(s) - if err != nil { - return err - } - - _, err = b.ReadFrom(bytes.NewReader(buf)) - return err -} diff --git a/vendor/github.com/willf/bitset/popcnt.go b/vendor/github.com/willf/bitset/popcnt.go deleted file mode 100644 index 76577a838..000000000 --- a/vendor/github.com/willf/bitset/popcnt.go +++ /dev/null @@ -1,53 +0,0 @@ -package bitset - -// bit population count, take from -// https://code.google.com/p/go/issues/detail?id=4988#c11 -// credit: https://code.google.com/u/arnehormann/ -func popcount(x uint64) (n uint64) { - x -= (x >> 1) & 0x5555555555555555 - x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 - x += x >> 4 - x &= 0x0f0f0f0f0f0f0f0f - x *= 0x0101010101010101 - return x >> 56 -} - -func popcntSliceGo(s []uint64) uint64 { - cnt := uint64(0) - for _, x := range s { - cnt += popcount(x) - } - return cnt -} - -func popcntMaskSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] &^ m[i]) - } - return cnt -} - -func popcntAndSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] & m[i]) - } - return cnt -} - -func popcntOrSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] | m[i]) - } - return cnt -} - -func popcntXorSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] ^ m[i]) - } - return cnt -} diff --git a/vendor/github.com/willf/bitset/popcnt_19.go b/vendor/github.com/willf/bitset/popcnt_19.go deleted file mode 100644 index fc8ff4f36..000000000 --- a/vendor/github.com/willf/bitset/popcnt_19.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func popcntSlice(s []uint64) uint64 { - var cnt int - for _, x := range s { - cnt += bits.OnesCount64(x) - } - return uint64(cnt) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] &^ m[i]) - } - return uint64(cnt) -} - -func popcntAndSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] & m[i]) - } - return uint64(cnt) -} - -func popcntOrSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] | m[i]) - } - return uint64(cnt) -} - -func popcntXorSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] ^ m[i]) - } - return uint64(cnt) -} diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.go b/vendor/github.com/willf/bitset/popcnt_amd64.go deleted file mode 100644 index 4cf64f24a..000000000 --- a/vendor/github.com/willf/bitset/popcnt_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -package bitset - -// *** the following functions are defined in popcnt_amd64.s - -//go:noescape - -func hasAsm() bool - -// useAsm is a flag used to select the GO or ASM implementation of the popcnt function -var useAsm = hasAsm() - -//go:noescape - -func popcntSliceAsm(s []uint64) uint64 - -//go:noescape - -func popcntMaskSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntAndSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntOrSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntXorSliceAsm(s, m []uint64) uint64 - -func popcntSlice(s []uint64) uint64 { - if useAsm { - return popcntSliceAsm(s) - } - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - if useAsm { - return popcntMaskSliceAsm(s, m) - } - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - if useAsm { - return popcntAndSliceAsm(s, m) - } - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - if useAsm { - return popcntOrSliceAsm(s, m) - } - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - if useAsm { - return popcntXorSliceAsm(s, m) - } - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.s b/vendor/github.com/willf/bitset/popcnt_amd64.s deleted file mode 100644 index 666c0dcc1..000000000 --- a/vendor/github.com/willf/bitset/popcnt_amd64.s +++ /dev/null @@ -1,104 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -TEXT ·hasAsm(SB),4,$0-1 -MOVQ $1, AX -CPUID -SHRQ $23, CX -ANDQ $1, CX -MOVB CX, ret+0(FP) -RET - -#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 - -TEXT ·popcntSliceAsm(SB),4,$0-32 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntSliceEnd -popcntSliceLoop: -BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX -ADDQ DX, AX -ADDQ $8, SI -LOOP popcntSliceLoop -popcntSliceEnd: -MOVQ AX, ret+24(FP) -RET - -TEXT ·popcntMaskSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntMaskSliceEnd -MOVQ m+24(FP), DI -popcntMaskSliceLoop: -MOVQ (DI), DX -NOTQ DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntMaskSliceLoop -popcntMaskSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntAndSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntAndSliceEnd -MOVQ m+24(FP), DI -popcntAndSliceLoop: -MOVQ (DI), DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntAndSliceLoop -popcntAndSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntOrSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntOrSliceEnd -MOVQ m+24(FP), DI -popcntOrSliceLoop: -MOVQ (DI), DX -ORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntOrSliceLoop -popcntOrSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntXorSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntXorSliceEnd -MOVQ m+24(FP), DI -popcntXorSliceLoop: -MOVQ (DI), DX -XORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntXorSliceLoop -popcntXorSliceEnd: -MOVQ AX, ret+48(FP) -RET diff --git a/vendor/github.com/willf/bitset/popcnt_generic.go b/vendor/github.com/willf/bitset/popcnt_generic.go deleted file mode 100644 index 21e0ff7b4..000000000 --- a/vendor/github.com/willf/bitset/popcnt_generic.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.9 -// +build !amd64 appengine - -package bitset - -func popcntSlice(s []uint64) uint64 { - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/willf/bitset/trailing_zeros_18.go b/vendor/github.com/willf/bitset/trailing_zeros_18.go deleted file mode 100644 index c52b61be9..000000000 --- a/vendor/github.com/willf/bitset/trailing_zeros_18.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package bitset - -var deBruijn = [...]byte{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} - -func trailingZeroes64(v uint64) uint { - return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) -} diff --git a/vendor/github.com/willf/bitset/trailing_zeros_19.go b/vendor/github.com/willf/bitset/trailing_zeros_19.go deleted file mode 100644 index 36a988e71..000000000 --- a/vendor/github.com/willf/bitset/trailing_zeros_19.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func trailingZeroes64(v uint64) uint { - return uint(bits.TrailingZeros64(v)) -} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba0..000000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976..000000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go deleted file mode 100644 index 00ee95550..000000000 --- a/vendor/golang.org/x/crypto/acme/acme.go +++ /dev/null @@ -1,949 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package acme provides an implementation of the -// Automatic Certificate Management Environment (ACME) spec. -// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details. -// -// Most common scenarios will want to use autocert subdirectory instead, -// which provides automatic access to certificates from Let's Encrypt -// and any other ACME-based CA. -// -// This package is a work in progress and makes no API stability promises. -package acme - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net/http" - "strings" - "sync" - "time" -) - -const ( - // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. - LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" - - // ALPNProto is the ALPN protocol name used by a CA server when validating - // tls-alpn-01 challenges. - // - // Package users must ensure their servers can negotiate the ACME ALPN in - // order for tls-alpn-01 challenge verifications to succeed. - // See the crypto/tls package's Config.NextProtos field. - ALPNProto = "acme-tls/1" -) - -// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge. -var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1} - -const ( - maxChainLen = 5 // max depth and breadth of a certificate chain - maxCertSize = 1 << 20 // max size of a certificate, in bytes - - // Max number of collected nonces kept in memory. - // Expect usual peak of 1 or 2. - maxNonces = 100 -) - -// Client is an ACME client. -// The only required field is Key. An example of creating a client with a new key -// is as follows: -// -// key, err := rsa.GenerateKey(rand.Reader, 2048) -// if err != nil { -// log.Fatal(err) -// } -// client := &Client{Key: key} -// -type Client struct { - // Key is the account key used to register with a CA and sign requests. - // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. - // - // The following algorithms are supported: - // RS256, ES256, ES384 and ES512. - // See RFC7518 for more details about the algorithms. - Key crypto.Signer - - // HTTPClient optionally specifies an HTTP client to use - // instead of http.DefaultClient. - HTTPClient *http.Client - - // DirectoryURL points to the CA directory endpoint. - // If empty, LetsEncryptURL is used. - // Mutating this value after a successful call of Client's Discover method - // will have no effect. - DirectoryURL string - - // RetryBackoff computes the duration after which the nth retry of a failed request - // should occur. The value of n for the first call on failure is 1. - // The values of r and resp are the request and response of the last failed attempt. - // If the returned value is negative or zero, no more retries are done and an error - // is returned to the caller of the original method. - // - // Requests which result in a 4xx client error are not retried, - // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. - // - // If RetryBackoff is nil, a truncated exponential backoff algorithm - // with the ceiling of 10 seconds is used, where each subsequent retry n - // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), - // preferring the former if "Retry-After" header is found in the resp. - // The jitter is a random value up to 1 second. - RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration - - dirMu sync.Mutex // guards writes to dir - dir *Directory // cached result of Client's Discover method - - noncesMu sync.Mutex - nonces map[string]struct{} // nonces collected from previous responses -} - -// Discover performs ACME server discovery using c.DirectoryURL. -// -// It caches successful result. So, subsequent calls will not result in -// a network round-trip. This also means mutating c.DirectoryURL after successful call -// of this method will have no effect. -func (c *Client) Discover(ctx context.Context) (Directory, error) { - c.dirMu.Lock() - defer c.dirMu.Unlock() - if c.dir != nil { - return *c.dir, nil - } - - res, err := c.get(ctx, c.directoryURL(), wantStatus(http.StatusOK)) - if err != nil { - return Directory{}, err - } - defer res.Body.Close() - c.addNonce(res.Header) - - var v struct { - Reg string `json:"new-reg"` - Authz string `json:"new-authz"` - Cert string `json:"new-cert"` - Revoke string `json:"revoke-cert"` - Meta struct { - Terms string `json:"terms-of-service"` - Website string `json:"website"` - CAA []string `json:"caa-identities"` - } - } - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return Directory{}, err - } - c.dir = &Directory{ - RegURL: v.Reg, - AuthzURL: v.Authz, - CertURL: v.Cert, - RevokeURL: v.Revoke, - Terms: v.Meta.Terms, - Website: v.Meta.Website, - CAA: v.Meta.CAA, - } - return *c.dir, nil -} - -func (c *Client) directoryURL() string { - if c.DirectoryURL != "" { - return c.DirectoryURL - } - return LetsEncryptURL -} - -// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. -// The exp argument indicates the desired certificate validity duration. CA may issue a certificate -// with a different duration. -// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. -// -// In the case where CA server does not provide the issued certificate in the response, -// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. -// In such a scenario, the caller can cancel the polling with ctx. -// -// CreateCert returns an error if the CA's response or chain was unreasonably large. -// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. -func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { - if _, err := c.Discover(ctx); err != nil { - return nil, "", err - } - - req := struct { - Resource string `json:"resource"` - CSR string `json:"csr"` - NotBefore string `json:"notBefore,omitempty"` - NotAfter string `json:"notAfter,omitempty"` - }{ - Resource: "new-cert", - CSR: base64.RawURLEncoding.EncodeToString(csr), - } - now := timeNow() - req.NotBefore = now.Format(time.RFC3339) - if exp > 0 { - req.NotAfter = now.Add(exp).Format(time.RFC3339) - } - - res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated)) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - - curl := res.Header.Get("Location") // cert permanent URL - if res.ContentLength == 0 { - // no cert in the body; poll until we get it - cert, err := c.FetchCert(ctx, curl, bundle) - return cert, curl, err - } - // slurp issued cert and CA chain, if requested - cert, err := c.responseCert(ctx, res, bundle) - return cert, curl, err -} - -// FetchCert retrieves already issued certificate from the given url, in DER format. -// It retries the request until the certificate is successfully retrieved, -// context is cancelled by the caller or an error response is received. -// -// The returned value will also contain the CA (issuer) certificate if the bundle argument is true. -// -// FetchCert returns an error if the CA's response or chain was unreasonably large. -// Callers are encouraged to parse the returned value to ensure the certificate is valid -// and has expected features. -func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { - res, err := c.get(ctx, url, wantStatus(http.StatusOK)) - if err != nil { - return nil, err - } - return c.responseCert(ctx, res, bundle) -} - -// RevokeCert revokes a previously issued certificate cert, provided in DER format. -// -// The key argument, used to sign the request, must be authorized -// to revoke the certificate. It's up to the CA to decide which keys are authorized. -// For instance, the key pair of the certificate may be authorized. -// If the key is nil, c.Key is used instead. -func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { - if _, err := c.Discover(ctx); err != nil { - return err - } - - body := &struct { - Resource string `json:"resource"` - Cert string `json:"certificate"` - Reason int `json:"reason"` - }{ - Resource: "revoke-cert", - Cert: base64.RawURLEncoding.EncodeToString(cert), - Reason: int(reason), - } - if key == nil { - key = c.Key - } - res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK)) - if err != nil { - return err - } - defer res.Body.Close() - return nil -} - -// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service -// during account registration. See Register method of Client for more details. -func AcceptTOS(tosURL string) bool { return true } - -// Register creates a new account registration by following the "new-reg" flow. -// It returns the registered account. The account is not modified. -// -// The registration may require the caller to agree to the CA's Terms of Service (TOS). -// If so, and the account has not indicated the acceptance of the terms (see Account for details), -// Register calls prompt with a TOS URL provided by the CA. Prompt should report -// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. -func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) { - if _, err := c.Discover(ctx); err != nil { - return nil, err - } - - var err error - if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil { - return nil, err - } - var accept bool - if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { - accept = prompt(a.CurrentTerms) - } - if accept { - a.AgreedTerms = a.CurrentTerms - a, err = c.UpdateReg(ctx, a) - } - return a, err -} - -// GetReg retrieves an existing registration. -// The url argument is an Account URI. -func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { - a, err := c.doReg(ctx, url, "reg", nil) - if err != nil { - return nil, err - } - a.URI = url - return a, nil -} - -// UpdateReg updates an existing registration. -// It returns an updated account copy. The provided account is not modified. -func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) { - uri := a.URI - a, err := c.doReg(ctx, uri, "reg", a) - if err != nil { - return nil, err - } - a.URI = uri - return a, nil -} - -// Authorize performs the initial step in an authorization flow. -// The caller will then need to choose from and perform a set of returned -// challenges using c.Accept in order to successfully complete authorization. -// -// If an authorization has been previously granted, the CA may return -// a valid authorization (Authorization.Status is StatusValid). If so, the caller -// need not fulfill any challenge and can proceed to requesting a certificate. -func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { - return c.authorize(ctx, "dns", domain) -} - -// AuthorizeIP is the same as Authorize but requests IP address authorization. -// Clients which successfully obtain such authorization may request to issue -// a certificate for IP addresses. -// -// See the ACME spec extension for more details about IP address identifiers: -// https://tools.ietf.org/html/draft-ietf-acme-ip. -func (c *Client) AuthorizeIP(ctx context.Context, ipaddr string) (*Authorization, error) { - return c.authorize(ctx, "ip", ipaddr) -} - -func (c *Client) authorize(ctx context.Context, typ, val string) (*Authorization, error) { - if _, err := c.Discover(ctx); err != nil { - return nil, err - } - - type authzID struct { - Type string `json:"type"` - Value string `json:"value"` - } - req := struct { - Resource string `json:"resource"` - Identifier authzID `json:"identifier"` - }{ - Resource: "new-authz", - Identifier: authzID{Type: typ, Value: val}, - } - res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var v wireAuthz - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - if v.Status != StatusPending && v.Status != StatusValid { - return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) - } - return v.authorization(res.Header.Get("Location")), nil -} - -// GetAuthorization retrieves an authorization identified by the given URL. -// -// If a caller needs to poll an authorization until its status is final, -// see the WaitAuthorization method. -func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { - res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) - if err != nil { - return nil, err - } - defer res.Body.Close() - var v wireAuthz - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - return v.authorization(url), nil -} - -// RevokeAuthorization relinquishes an existing authorization identified -// by the given URL. -// The url argument is an Authorization.URI value. -// -// If successful, the caller will be required to obtain a new authorization -// using the Authorize method before being able to request a new certificate -// for the domain associated with the authorization. -// -// It does not revoke existing certificates. -func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { - req := struct { - Resource string `json:"resource"` - Status string `json:"status"` - Delete bool `json:"delete"` - }{ - Resource: "authz", - Status: "deactivated", - Delete: true, - } - res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK)) - if err != nil { - return err - } - defer res.Body.Close() - return nil -} - -// WaitAuthorization polls an authorization at the given URL -// until it is in one of the final states, StatusValid or StatusInvalid, -// the ACME CA responded with a 4xx error code, or the context is done. -// -// It returns a non-nil Authorization only if its Status is StatusValid. -// In all other cases WaitAuthorization returns an error. -// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. -func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { - for { - res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) - if err != nil { - return nil, err - } - - var raw wireAuthz - err = json.NewDecoder(res.Body).Decode(&raw) - res.Body.Close() - switch { - case err != nil: - // Skip and retry. - case raw.Status == StatusValid: - return raw.authorization(url), nil - case raw.Status == StatusInvalid: - return nil, raw.error(url) - } - - // Exponential backoff is implemented in c.get above. - // This is just to prevent continuously hitting the CA - // while waiting for a final authorization status. - d := retryAfter(res.Header.Get("Retry-After")) - if d == 0 { - // Given that the fastest challenges TLS-SNI and HTTP-01 - // require a CA to make at least 1 network round trip - // and most likely persist a challenge state, - // this default delay seems reasonable. - d = time.Second - } - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return nil, ctx.Err() - case <-t.C: - // Retry. - } - } -} - -// GetChallenge retrieves the current status of an challenge. -// -// A client typically polls a challenge status using this method. -func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { - res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) - if err != nil { - return nil, err - } - defer res.Body.Close() - v := wireChallenge{URI: url} - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - return v.challenge(), nil -} - -// Accept informs the server that the client accepts one of its challenges -// previously obtained with c.Authorize. -// -// The server will then perform the validation asynchronously. -func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { - auth, err := keyAuth(c.Key.Public(), chal.Token) - if err != nil { - return nil, err - } - - req := struct { - Resource string `json:"resource"` - Type string `json:"type"` - Auth string `json:"keyAuthorization"` - }{ - Resource: "challenge", - Type: chal.Type, - Auth: auth, - } - res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus( - http.StatusOK, // according to the spec - http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) - )) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var v wireChallenge - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - return v.challenge(), nil -} - -// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. -// A TXT record containing the returned value must be provisioned under -// "_acme-challenge" name of the domain being validated. -// -// The token argument is a Challenge.Token value. -func (c *Client) DNS01ChallengeRecord(token string) (string, error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return "", err - } - b := sha256.Sum256([]byte(ka)) - return base64.RawURLEncoding.EncodeToString(b[:]), nil -} - -// HTTP01ChallengeResponse returns the response for an http-01 challenge. -// Servers should respond with the value to HTTP requests at the URL path -// provided by HTTP01ChallengePath to validate the challenge and prove control -// over a domain name. -// -// The token argument is a Challenge.Token value. -func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { - return keyAuth(c.Key.Public(), token) -} - -// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge -// should be provided by the servers. -// The response value can be obtained with HTTP01ChallengeResponse. -// -// The token argument is a Challenge.Token value. -func (c *Client) HTTP01ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. -// Servers can present the certificate to validate the challenge and prove control -// over a domain name. -// -// The implementation is incomplete in that the returned value is a single certificate, -// computed only for Z0 of the key authorization. ACME CAs are expected to update -// their implementations to use the newer version, TLS-SNI-02. -// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3. -// -// The token argument is a Challenge.Token value. -// If a WithKey option is provided, its private part signs the returned cert, -// and the public part is used to specify the signee. -// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. -// -// The returned certificate is valid for the next 24 hours and must be presented only when -// the server name of the TLS ClientHello matches exactly the returned name value. -func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, "", err - } - b := sha256.Sum256([]byte(ka)) - h := hex.EncodeToString(b[:]) - name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) - cert, err = tlsChallengeCert([]string{name}, opt) - if err != nil { - return tls.Certificate{}, "", err - } - return cert, name, nil -} - -// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. -// Servers can present the certificate to validate the challenge and prove control -// over a domain name. For more details on TLS-SNI-02 see -// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3. -// -// The token argument is a Challenge.Token value. -// If a WithKey option is provided, its private part signs the returned cert, -// and the public part is used to specify the signee. -// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. -// -// The returned certificate is valid for the next 24 hours and must be presented only when -// the server name in the TLS ClientHello matches exactly the returned name value. -func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { - b := sha256.Sum256([]byte(token)) - h := hex.EncodeToString(b[:]) - sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) - - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, "", err - } - b = sha256.Sum256([]byte(ka)) - h = hex.EncodeToString(b[:]) - sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) - - cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) - if err != nil { - return tls.Certificate{}, "", err - } - return cert, sanA, nil -} - -// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. -// Servers can present the certificate to validate the challenge and prove control -// over a domain name. For more details on TLS-ALPN-01 see -// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 -// -// The token argument is a Challenge.Token value. -// If a WithKey option is provided, its private part signs the returned cert, -// and the public part is used to specify the signee. -// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. -// -// The returned certificate is valid for the next 24 hours and must be presented only when -// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol -// has been specified. -func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { - ka, err := keyAuth(c.Key.Public(), token) - if err != nil { - return tls.Certificate{}, err - } - shasum := sha256.Sum256([]byte(ka)) - extValue, err := asn1.Marshal(shasum[:]) - if err != nil { - return tls.Certificate{}, err - } - acmeExtension := pkix.Extension{ - Id: idPeACMEIdentifierV1, - Critical: true, - Value: extValue, - } - - tmpl := defaultTLSChallengeCertTemplate() - - var newOpt []CertOption - for _, o := range opt { - switch o := o.(type) { - case *certOptTemplate: - t := *(*x509.Certificate)(o) // shallow copy is ok - tmpl = &t - default: - newOpt = append(newOpt, o) - } - } - tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) - newOpt = append(newOpt, WithTemplate(tmpl)) - return tlsChallengeCert([]string{domain}, newOpt) -} - -// doReg sends all types of registration requests. -// The type of request is identified by typ argument, which is a "resource" -// in the ACME spec terms. -// -// A non-nil acct argument indicates whether the intention is to mutate data -// of the Account. Only Contact and Agreement of its fields are used -// in such cases. -func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { - req := struct { - Resource string `json:"resource"` - Contact []string `json:"contact,omitempty"` - Agreement string `json:"agreement,omitempty"` - }{ - Resource: typ, - } - if acct != nil { - req.Contact = acct.Contact - req.Agreement = acct.AgreedTerms - } - res, err := c.post(ctx, c.Key, url, req, wantStatus( - http.StatusOK, // updates and deletes - http.StatusCreated, // new account creation - http.StatusAccepted, // Let's Encrypt divergent implementation - )) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var v struct { - Contact []string - Agreement string - Authorizations string - Certificates string - } - if err := json.NewDecoder(res.Body).Decode(&v); err != nil { - return nil, fmt.Errorf("acme: invalid response: %v", err) - } - var tos string - if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { - tos = v[0] - } - var authz string - if v := linkHeader(res.Header, "next"); len(v) > 0 { - authz = v[0] - } - return &Account{ - URI: res.Header.Get("Location"), - Contact: v.Contact, - AgreedTerms: v.Agreement, - CurrentTerms: tos, - Authz: authz, - Authorizations: v.Authorizations, - Certificates: v.Certificates, - }, nil -} - -// popNonce returns a nonce value previously stored with c.addNonce -// or fetches a fresh one from a URL by issuing a HEAD request. -// It first tries c.directoryURL() and then the provided url if the former fails. -func (c *Client) popNonce(ctx context.Context, url string) (string, error) { - c.noncesMu.Lock() - defer c.noncesMu.Unlock() - if len(c.nonces) == 0 { - dirURL := c.directoryURL() - v, err := c.fetchNonce(ctx, dirURL) - if err != nil && url != dirURL { - v, err = c.fetchNonce(ctx, url) - } - return v, err - } - var nonce string - for nonce = range c.nonces { - delete(c.nonces, nonce) - break - } - return nonce, nil -} - -// clearNonces clears any stored nonces -func (c *Client) clearNonces() { - c.noncesMu.Lock() - defer c.noncesMu.Unlock() - c.nonces = make(map[string]struct{}) -} - -// addNonce stores a nonce value found in h (if any) for future use. -func (c *Client) addNonce(h http.Header) { - v := nonceFromHeader(h) - if v == "" { - return - } - c.noncesMu.Lock() - defer c.noncesMu.Unlock() - if len(c.nonces) >= maxNonces { - return - } - if c.nonces == nil { - c.nonces = make(map[string]struct{}) - } - c.nonces[v] = struct{}{} -} - -func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { - r, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return "", err - } - resp, err := c.doNoRetry(ctx, r) - if err != nil { - return "", err - } - defer resp.Body.Close() - nonce := nonceFromHeader(resp.Header) - if nonce == "" { - if resp.StatusCode > 299 { - return "", responseError(resp) - } - return "", errors.New("acme: nonce not found") - } - return nonce, nil -} - -func nonceFromHeader(h http.Header) string { - return h.Get("Replay-Nonce") -} - -func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) { - b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) - if err != nil { - return nil, fmt.Errorf("acme: response stream: %v", err) - } - if len(b) > maxCertSize { - return nil, errors.New("acme: certificate is too big") - } - cert := [][]byte{b} - if !bundle { - return cert, nil - } - - // Append CA chain cert(s). - // At least one is required according to the spec: - // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 - up := linkHeader(res.Header, "up") - if len(up) == 0 { - return nil, errors.New("acme: rel=up link not found") - } - if len(up) > maxChainLen { - return nil, errors.New("acme: rel=up link is too large") - } - for _, url := range up { - cc, err := c.chainCert(ctx, url, 0) - if err != nil { - return nil, err - } - cert = append(cert, cc...) - } - return cert, nil -} - -// chainCert fetches CA certificate chain recursively by following "up" links. -// Each recursive call increments the depth by 1, resulting in an error -// if the recursion level reaches maxChainLen. -// -// First chainCert call starts with depth of 0. -func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) { - if depth >= maxChainLen { - return nil, errors.New("acme: certificate chain is too deep") - } - - res, err := c.get(ctx, url, wantStatus(http.StatusOK)) - if err != nil { - return nil, err - } - defer res.Body.Close() - b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) - if err != nil { - return nil, err - } - if len(b) > maxCertSize { - return nil, errors.New("acme: certificate is too big") - } - chain := [][]byte{b} - - uplink := linkHeader(res.Header, "up") - if len(uplink) > maxChainLen { - return nil, errors.New("acme: certificate chain is too large") - } - for _, up := range uplink { - cc, err := c.chainCert(ctx, up, depth+1) - if err != nil { - return nil, err - } - chain = append(chain, cc...) - } - - return chain, nil -} - -// linkHeader returns URI-Reference values of all Link headers -// with relation-type rel. -// See https://tools.ietf.org/html/rfc5988#section-5 for details. -func linkHeader(h http.Header, rel string) []string { - var links []string - for _, v := range h["Link"] { - parts := strings.Split(v, ";") - for _, p := range parts { - p = strings.TrimSpace(p) - if !strings.HasPrefix(p, "rel=") { - continue - } - if v := strings.Trim(p[4:], `"`); v == rel { - links = append(links, strings.Trim(parts[0], "<>")) - } - } - } - return links -} - -// keyAuth generates a key authorization string for a given token. -func keyAuth(pub crypto.PublicKey, token string) (string, error) { - th, err := JWKThumbprint(pub) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", token, th), nil -} - -// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. -func defaultTLSChallengeCertTemplate() *x509.Certificate { - return &x509.Certificate{ - SerialNumber: big.NewInt(1), - NotBefore: time.Now(), - NotAfter: time.Now().Add(24 * time.Hour), - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - } -} - -// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges -// with the given SANs and auto-generated public/private key pair. -// The Subject Common Name is set to the first SAN to aid debugging. -// To create a cert with a custom key pair, specify WithKey option. -func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { - var key crypto.Signer - tmpl := defaultTLSChallengeCertTemplate() - for _, o := range opt { - switch o := o.(type) { - case *certOptKey: - if key != nil { - return tls.Certificate{}, errors.New("acme: duplicate key option") - } - key = o.key - case *certOptTemplate: - t := *(*x509.Certificate)(o) // shallow copy is ok - tmpl = &t - default: - // package's fault, if we let this happen: - panic(fmt.Sprintf("unsupported option type %T", o)) - } - } - if key == nil { - var err error - if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { - return tls.Certificate{}, err - } - } - tmpl.DNSNames = san - if len(san) > 0 { - tmpl.Subject.CommonName = san[0] - } - - der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) - if err != nil { - return tls.Certificate{}, err - } - return tls.Certificate{ - Certificate: [][]byte{der}, - PrivateKey: key, - }, nil -} - -// encodePEM returns b encoded as PEM with block of type typ. -func encodePEM(typ string, b []byte) []byte { - pb := &pem.Block{Type: typ, Bytes: b} - return pem.EncodeToMemory(pb) -} - -// timeNow is useful for testing for fixed current time. -var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go deleted file mode 100644 index e562609cc..000000000 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ /dev/null @@ -1,1156 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package autocert provides automatic access to certificates from Let's Encrypt -// and any other ACME-based CA. -// -// This package is a work in progress and makes no API stability promises. -package autocert - -import ( - "bytes" - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "io" - mathrand "math/rand" - "net" - "net/http" - "path" - "strings" - "sync" - "time" - - "golang.org/x/crypto/acme" - "golang.org/x/net/idna" -) - -// createCertRetryAfter is how much time to wait before removing a failed state -// entry due to an unsuccessful createCert call. -// This is a variable instead of a const for testing. -// TODO: Consider making it configurable or an exp backoff? -var createCertRetryAfter = time.Minute - -// pseudoRand is safe for concurrent use. -var pseudoRand *lockedMathRand - -func init() { - src := mathrand.NewSource(time.Now().UnixNano()) - pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} -} - -// AcceptTOS is a Manager.Prompt function that always returns true to -// indicate acceptance of the CA's Terms of Service during account -// registration. -func AcceptTOS(tosURL string) bool { return true } - -// HostPolicy specifies which host names the Manager is allowed to respond to. -// It returns a non-nil error if the host should be rejected. -// The returned error is accessible via tls.Conn.Handshake and its callers. -// See Manager's HostPolicy field and GetCertificate method docs for more details. -type HostPolicy func(ctx context.Context, host string) error - -// HostWhitelist returns a policy where only the specified host names are allowed. -// Only exact matches are currently supported. Subdomains, regexp or wildcard -// will not match. -// -// Note that all hosts will be converted to Punycode via idna.Lookup.ToASCII so that -// Manager.GetCertificate can handle the Unicode IDN and mixedcase hosts correctly. -// Invalid hosts will be silently ignored. -func HostWhitelist(hosts ...string) HostPolicy { - whitelist := make(map[string]bool, len(hosts)) - for _, h := range hosts { - if h, err := idna.Lookup.ToASCII(h); err == nil { - whitelist[h] = true - } - } - return func(_ context.Context, host string) error { - if !whitelist[host] { - return fmt.Errorf("acme/autocert: host %q not configured in HostWhitelist", host) - } - return nil - } -} - -// defaultHostPolicy is used when Manager.HostPolicy is not set. -func defaultHostPolicy(context.Context, string) error { - return nil -} - -// Manager is a stateful certificate manager built on top of acme.Client. -// It obtains and refreshes certificates automatically using "tls-alpn-01", -// "tls-sni-01", "tls-sni-02" and "http-01" challenge types, -// as well as providing them to a TLS server via tls.Config. -// -// You must specify a cache implementation, such as DirCache, -// to reuse obtained certificates across program restarts. -// Otherwise your server is very likely to exceed the certificate -// issuer's request rate limits. -type Manager struct { - // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). - // The registration may require the caller to agree to the CA's TOS. - // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report - // whether the caller agrees to the terms. - // - // To always accept the terms, the callers can use AcceptTOS. - Prompt func(tosURL string) bool - - // Cache optionally stores and retrieves previously-obtained certificates - // and other state. If nil, certs will only be cached for the lifetime of - // the Manager. Multiple Managers can share the same Cache. - // - // Using a persistent Cache, such as DirCache, is strongly recommended. - Cache Cache - - // HostPolicy controls which domains the Manager will attempt - // to retrieve new certificates for. It does not affect cached certs. - // - // If non-nil, HostPolicy is called before requesting a new cert. - // If nil, all hosts are currently allowed. This is not recommended, - // as it opens a potential attack where clients connect to a server - // by IP address and pretend to be asking for an incorrect host name. - // Manager will attempt to obtain a certificate for that host, incorrectly, - // eventually reaching the CA's rate limit for certificate requests - // and making it impossible to obtain actual certificates. - // - // See GetCertificate for more details. - HostPolicy HostPolicy - - // RenewBefore optionally specifies how early certificates should - // be renewed before they expire. - // - // If zero, they're renewed 30 days before expiration. - RenewBefore time.Duration - - // Client is used to perform low-level operations, such as account registration - // and requesting new certificates. - // - // If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL - // as directory endpoint. If the Client.Key is nil, a new ECDSA P-256 key is - // generated and, if Cache is not nil, stored in cache. - // - // Mutating the field after the first call of GetCertificate method will have no effect. - Client *acme.Client - - // Email optionally specifies a contact email address. - // This is used by CAs, such as Let's Encrypt, to notify about problems - // with issued certificates. - // - // If the Client's account key is already registered, Email is not used. - Email string - - // ForceRSA used to make the Manager generate RSA certificates. It is now ignored. - // - // Deprecated: the Manager will request the correct type of certificate based - // on what each client supports. - ForceRSA bool - - // ExtraExtensions are used when generating a new CSR (Certificate Request), - // thus allowing customization of the resulting certificate. - // For instance, TLS Feature Extension (RFC 7633) can be used - // to prevent an OCSP downgrade attack. - // - // The field value is passed to crypto/x509.CreateCertificateRequest - // in the template's ExtraExtensions field as is. - ExtraExtensions []pkix.Extension - - clientMu sync.Mutex - client *acme.Client // initialized by acmeClient method - - stateMu sync.Mutex - state map[certKey]*certState - - // renewal tracks the set of domains currently running renewal timers. - renewalMu sync.Mutex - renewal map[certKey]*domainRenewal - - // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens. - tokensMu sync.RWMutex - // tryHTTP01 indicates whether the Manager should try "http-01" challenge type - // during the authorization flow. - tryHTTP01 bool - // httpTokens contains response body values for http-01 challenges - // and is keyed by the URL path at which a challenge response is expected - // to be provisioned. - // The entries are stored for the duration of the authorization flow. - httpTokens map[string][]byte - // certTokens contains temporary certificates for tls-sni and tls-alpn challenges - // and is keyed by token domain name, which matches server name of ClientHello. - // Keys always have ".acme.invalid" suffix for tls-sni. Otherwise, they are domain names - // for tls-alpn. - // The entries are stored for the duration of the authorization flow. - certTokens map[string]*tls.Certificate - // nowFunc, if not nil, returns the current time. This may be set for - // testing purposes. - nowFunc func() time.Time -} - -// certKey is the key by which certificates are tracked in state, renewal and cache. -type certKey struct { - domain string // without trailing dot - isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA) - isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA -} - -func (c certKey) String() string { - if c.isToken { - return c.domain + "+token" - } - if c.isRSA { - return c.domain + "+rsa" - } - return c.domain -} - -// TLSConfig creates a new TLS config suitable for net/http.Server servers, -// supporting HTTP/2 and the tls-alpn-01 ACME challenge type. -func (m *Manager) TLSConfig() *tls.Config { - return &tls.Config{ - GetCertificate: m.GetCertificate, - NextProtos: []string{ - "h2", "http/1.1", // enable HTTP/2 - acme.ALPNProto, // enable tls-alpn ACME challenges - }, - } -} - -// GetCertificate implements the tls.Config.GetCertificate hook. -// It provides a TLS certificate for hello.ServerName host, including answering -// tls-alpn-01 and *.acme.invalid (tls-sni-01 and tls-sni-02) challenges. -// All other fields of hello are ignored. -// -// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting -// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. -// The error is propagated back to the caller of GetCertificate and is user-visible. -// This does not affect cached certs. See HostPolicy field description for more details. -// -// If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will -// also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler -// for http-01. (The tls-sni-* challenges have been deprecated by popular ACME providers -// due to security issues in the ecosystem.) -func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { - if m.Prompt == nil { - return nil, errors.New("acme/autocert: Manager.Prompt not set") - } - - name := hello.ServerName - if name == "" { - return nil, errors.New("acme/autocert: missing server name") - } - if !strings.Contains(strings.Trim(name, "."), ".") { - return nil, errors.New("acme/autocert: server name component count invalid") - } - - // Note that this conversion is necessary because some server names in the handshakes - // started by some clients (such as cURL) are not converted to Punycode, which will - // prevent us from obtaining certificates for them. In addition, we should also treat - // example.com and EXAMPLE.COM as equivalent and return the same certificate for them. - // Fortunately, this conversion also helped us deal with this kind of mixedcase problems. - // - // Due to the "σςΣ" problem (see https://unicode.org/faq/idn.html#22), we can't use - // idna.Punycode.ToASCII (or just idna.ToASCII) here. - name, err := idna.Lookup.ToASCII(name) - if err != nil { - return nil, errors.New("acme/autocert: server name contains invalid character") - } - - // In the worst-case scenario, the timeout needs to account for caching, host policy, - // domain ownership verification and certificate issuance. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - // Check whether this is a token cert requested for TLS-SNI or TLS-ALPN challenge. - if wantsTokenCert(hello) { - m.tokensMu.RLock() - defer m.tokensMu.RUnlock() - // It's ok to use the same token cert key for both tls-sni and tls-alpn - // because there's always at most 1 token cert per on-going domain authorization. - // See m.verify for details. - if cert := m.certTokens[name]; cert != nil { - return cert, nil - } - if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil { - return cert, nil - } - // TODO: cache error results? - return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) - } - - // regular domain - ck := certKey{ - domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 - isRSA: !supportsECDSA(hello), - } - cert, err := m.cert(ctx, ck) - if err == nil { - return cert, nil - } - if err != ErrCacheMiss { - return nil, err - } - - // first-time - if err := m.hostPolicy()(ctx, name); err != nil { - return nil, err - } - cert, err = m.createCert(ctx, ck) - if err != nil { - return nil, err - } - m.cachePut(ctx, ck, cert) - return cert, nil -} - -// wantsTokenCert reports whether a TLS request with SNI is made by a CA server -// for a challenge verification. -func wantsTokenCert(hello *tls.ClientHelloInfo) bool { - // tls-alpn-01 - if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto { - return true - } - // tls-sni-xx - return strings.HasSuffix(hello.ServerName, ".acme.invalid") -} - -func supportsECDSA(hello *tls.ClientHelloInfo) bool { - // The "signature_algorithms" extension, if present, limits the key exchange - // algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1. - if hello.SignatureSchemes != nil { - ecdsaOK := false - schemeLoop: - for _, scheme := range hello.SignatureSchemes { - const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10 - switch scheme { - case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256, - tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512: - ecdsaOK = true - break schemeLoop - } - } - if !ecdsaOK { - return false - } - } - if hello.SupportedCurves != nil { - ecdsaOK := false - for _, curve := range hello.SupportedCurves { - if curve == tls.CurveP256 { - ecdsaOK = true - break - } - } - if !ecdsaOK { - return false - } - } - for _, suite := range hello.CipherSuites { - switch suite { - case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: - return true - } - } - return false -} - -// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. -// It returns an http.Handler that responds to the challenges and must be -// running on port 80. If it receives a request that is not an ACME challenge, -// it delegates the request to the optional fallback handler. -// -// If fallback is nil, the returned handler redirects all GET and HEAD requests -// to the default TLS port 443 with 302 Found status code, preserving the original -// request path and query. It responds with 400 Bad Request to all other HTTP methods. -// The fallback is not protected by the optional HostPolicy. -// -// Because the fallback handler is run with unencrypted port 80 requests, -// the fallback should not serve TLS-only requests. -// -// If HTTPHandler is never called, the Manager will only use the "tls-alpn-01" -// challenge for domain verification. -func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - m.tryHTTP01 = true - - if fallback == nil { - fallback = http.HandlerFunc(handleHTTPRedirect) - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { - fallback.ServeHTTP(w, r) - return - } - // A reasonable context timeout for cache and host policy only, - // because we don't wait for a new certificate issuance here. - ctx, cancel := context.WithTimeout(r.Context(), time.Minute) - defer cancel() - if err := m.hostPolicy()(ctx, r.Host); err != nil { - http.Error(w, err.Error(), http.StatusForbidden) - return - } - data, err := m.httpToken(ctx, r.URL.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.Write(data) - }) -} - -func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" && r.Method != "HEAD" { - http.Error(w, "Use HTTPS", http.StatusBadRequest) - return - } - target := "https://" + stripPort(r.Host) + r.URL.RequestURI() - http.Redirect(w, r, target, http.StatusFound) -} - -func stripPort(hostport string) string { - host, _, err := net.SplitHostPort(hostport) - if err != nil { - return hostport - } - return net.JoinHostPort(host, "443") -} - -// cert returns an existing certificate either from m.state or cache. -// If a certificate is found in cache but not in m.state, the latter will be filled -// with the cached value. -func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) { - m.stateMu.Lock() - if s, ok := m.state[ck]; ok { - m.stateMu.Unlock() - s.RLock() - defer s.RUnlock() - return s.tlscert() - } - defer m.stateMu.Unlock() - cert, err := m.cacheGet(ctx, ck) - if err != nil { - return nil, err - } - signer, ok := cert.PrivateKey.(crypto.Signer) - if !ok { - return nil, errors.New("acme/autocert: private key cannot sign") - } - if m.state == nil { - m.state = make(map[certKey]*certState) - } - s := &certState{ - key: signer, - cert: cert.Certificate, - leaf: cert.Leaf, - } - m.state[ck] = s - go m.renew(ck, s.key, s.leaf.NotAfter) - return cert, nil -} - -// cacheGet always returns a valid certificate, or an error otherwise. -// If a cached certificate exists but is not valid, ErrCacheMiss is returned. -func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) { - if m.Cache == nil { - return nil, ErrCacheMiss - } - data, err := m.Cache.Get(ctx, ck.String()) - if err != nil { - return nil, err - } - - // private - priv, pub := pem.Decode(data) - if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { - return nil, ErrCacheMiss - } - privKey, err := parsePrivateKey(priv.Bytes) - if err != nil { - return nil, err - } - - // public - var pubDER [][]byte - for len(pub) > 0 { - var b *pem.Block - b, pub = pem.Decode(pub) - if b == nil { - break - } - pubDER = append(pubDER, b.Bytes) - } - if len(pub) > 0 { - // Leftover content not consumed by pem.Decode. Corrupt. Ignore. - return nil, ErrCacheMiss - } - - // verify and create TLS cert - leaf, err := validCert(ck, pubDER, privKey, m.now()) - if err != nil { - return nil, ErrCacheMiss - } - tlscert := &tls.Certificate{ - Certificate: pubDER, - PrivateKey: privKey, - Leaf: leaf, - } - return tlscert, nil -} - -func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error { - if m.Cache == nil { - return nil - } - - // contains PEM-encoded data - var buf bytes.Buffer - - // private - switch key := tlscert.PrivateKey.(type) { - case *ecdsa.PrivateKey: - if err := encodeECDSAKey(&buf, key); err != nil { - return err - } - case *rsa.PrivateKey: - b := x509.MarshalPKCS1PrivateKey(key) - pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return err - } - default: - return errors.New("acme/autocert: unknown private key type") - } - - // public - for _, b := range tlscert.Certificate { - pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return err - } - } - - return m.Cache.Put(ctx, ck.String(), buf.Bytes()) -} - -func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { - b, err := x509.MarshalECPrivateKey(key) - if err != nil { - return err - } - pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - return pem.Encode(w, pb) -} - -// createCert starts the domain ownership verification and returns a certificate -// for that domain upon success. -// -// If the domain is already being verified, it waits for the existing verification to complete. -// Either way, createCert blocks for the duration of the whole process. -func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) { - // TODO: maybe rewrite this whole piece using sync.Once - state, err := m.certState(ck) - if err != nil { - return nil, err - } - // state may exist if another goroutine is already working on it - // in which case just wait for it to finish - if !state.locked { - state.RLock() - defer state.RUnlock() - return state.tlscert() - } - - // We are the first; state is locked. - // Unblock the readers when domain ownership is verified - // and we got the cert or the process failed. - defer state.Unlock() - state.locked = false - - der, leaf, err := m.authorizedCert(ctx, state.key, ck) - if err != nil { - // Remove the failed state after some time, - // making the manager call createCert again on the following TLS hello. - time.AfterFunc(createCertRetryAfter, func() { - defer testDidRemoveState(ck) - m.stateMu.Lock() - defer m.stateMu.Unlock() - // Verify the state hasn't changed and it's still invalid - // before deleting. - s, ok := m.state[ck] - if !ok { - return - } - if _, err := validCert(ck, s.cert, s.key, m.now()); err == nil { - return - } - delete(m.state, ck) - }) - return nil, err - } - state.cert = der - state.leaf = leaf - go m.renew(ck, state.key, state.leaf.NotAfter) - return state.tlscert() -} - -// certState returns a new or existing certState. -// If a new certState is returned, state.exist is false and the state is locked. -// The returned error is non-nil only in the case where a new state could not be created. -func (m *Manager) certState(ck certKey) (*certState, error) { - m.stateMu.Lock() - defer m.stateMu.Unlock() - if m.state == nil { - m.state = make(map[certKey]*certState) - } - // existing state - if state, ok := m.state[ck]; ok { - return state, nil - } - - // new locked state - var ( - err error - key crypto.Signer - ) - if ck.isRSA { - key, err = rsa.GenerateKey(rand.Reader, 2048) - } else { - key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - } - if err != nil { - return nil, err - } - - state := &certState{ - key: key, - locked: true, - } - state.Lock() // will be unlocked by m.certState caller - m.state[ck] = state - return state, nil -} - -// authorizedCert starts the domain ownership verification process and requests a new cert upon success. -// The key argument is the certificate private key. -func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) { - client, err := m.acmeClient(ctx) - if err != nil { - return nil, nil, err - } - - if err := m.verify(ctx, client, ck.domain); err != nil { - return nil, nil, err - } - csr, err := certRequest(key, ck.domain, m.ExtraExtensions) - if err != nil { - return nil, nil, err - } - der, _, err = client.CreateCert(ctx, csr, 0, true) - if err != nil { - return nil, nil, err - } - leaf, err = validCert(ck, der, key, m.now()) - if err != nil { - return nil, nil, err - } - return der, leaf, nil -} - -// revokePendingAuthz revokes all authorizations idenfied by the elements of uri slice. -// It ignores revocation errors. -func (m *Manager) revokePendingAuthz(ctx context.Context, uri []string) { - client, err := m.acmeClient(ctx) - if err != nil { - return - } - for _, u := range uri { - client.RevokeAuthorization(ctx, u) - } -} - -// verify runs the identifier (domain) authorization flow -// using each applicable ACME challenge type. -func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { - // The list of challenge types we'll try to fulfill - // in this specific order. - challengeTypes := []string{"tls-alpn-01", "tls-sni-02", "tls-sni-01"} - m.tokensMu.RLock() - if m.tryHTTP01 { - challengeTypes = append(challengeTypes, "http-01") - } - m.tokensMu.RUnlock() - - // Keep track of pending authzs and revoke the ones that did not validate. - pendingAuthzs := make(map[string]bool) - defer func() { - var uri []string - for k, pending := range pendingAuthzs { - if pending { - uri = append(uri, k) - } - } - if len(uri) > 0 { - // Use "detached" background context. - // The revocations need not happen in the current verification flow. - go m.revokePendingAuthz(context.Background(), uri) - } - }() - - // errs accumulates challenge failure errors, printed if all fail - errs := make(map[*acme.Challenge]error) - var nextTyp int // challengeType index of the next challenge type to try - for { - // Start domain authorization and get the challenge. - authz, err := client.Authorize(ctx, domain) - if err != nil { - return err - } - // No point in accepting challenges if the authorization status - // is in a final state. - switch authz.Status { - case acme.StatusValid: - return nil // already authorized - case acme.StatusInvalid: - return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) - } - - pendingAuthzs[authz.URI] = true - - // Pick the next preferred challenge. - var chal *acme.Challenge - for chal == nil && nextTyp < len(challengeTypes) { - chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) - nextTyp++ - } - if chal == nil { - errorMsg := fmt.Sprintf("acme/autocert: unable to authorize %q", domain) - for chal, err := range errs { - errorMsg += fmt.Sprintf("; challenge %q failed with error: %v", chal.Type, err) - } - return errors.New(errorMsg) - } - cleanup, err := m.fulfill(ctx, client, chal, domain) - if err != nil { - errs[chal] = err - continue - } - defer cleanup() - if _, err := client.Accept(ctx, chal); err != nil { - errs[chal] = err - continue - } - - // A challenge is fulfilled and accepted: wait for the CA to validate. - if _, err := client.WaitAuthorization(ctx, authz.URI); err != nil { - errs[chal] = err - continue - } - delete(pendingAuthzs, authz.URI) - return nil - } -} - -// fulfill provisions a response to the challenge chal. -// The cleanup is non-nil only if provisioning succeeded. -func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) { - switch chal.Type { - case "tls-alpn-01": - cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain) - if err != nil { - return nil, err - } - m.putCertToken(ctx, domain, &cert) - return func() { go m.deleteCertToken(domain) }, nil - case "tls-sni-01": - cert, name, err := client.TLSSNI01ChallengeCert(chal.Token) - if err != nil { - return nil, err - } - m.putCertToken(ctx, name, &cert) - return func() { go m.deleteCertToken(name) }, nil - case "tls-sni-02": - cert, name, err := client.TLSSNI02ChallengeCert(chal.Token) - if err != nil { - return nil, err - } - m.putCertToken(ctx, name, &cert) - return func() { go m.deleteCertToken(name) }, nil - case "http-01": - resp, err := client.HTTP01ChallengeResponse(chal.Token) - if err != nil { - return nil, err - } - p := client.HTTP01ChallengePath(chal.Token) - m.putHTTPToken(ctx, p, resp) - return func() { go m.deleteHTTPToken(p) }, nil - } - return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) -} - -func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { - for _, c := range chal { - if c.Type == typ { - return c - } - } - return nil -} - -// putCertToken stores the token certificate with the specified name -// in both m.certTokens map and m.Cache. -func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - if m.certTokens == nil { - m.certTokens = make(map[string]*tls.Certificate) - } - m.certTokens[name] = cert - m.cachePut(ctx, certKey{domain: name, isToken: true}, cert) -} - -// deleteCertToken removes the token certificate with the specified name -// from both m.certTokens map and m.Cache. -func (m *Manager) deleteCertToken(name string) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - delete(m.certTokens, name) - if m.Cache != nil { - ck := certKey{domain: name, isToken: true} - m.Cache.Delete(context.Background(), ck.String()) - } -} - -// httpToken retrieves an existing http-01 token value from an in-memory map -// or the optional cache. -func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { - m.tokensMu.RLock() - defer m.tokensMu.RUnlock() - if v, ok := m.httpTokens[tokenPath]; ok { - return v, nil - } - if m.Cache == nil { - return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) - } - return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) -} - -// putHTTPToken stores an http-01 token value using tokenPath as key -// in both in-memory map and the optional Cache. -// -// It ignores any error returned from Cache.Put. -func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - if m.httpTokens == nil { - m.httpTokens = make(map[string][]byte) - } - b := []byte(val) - m.httpTokens[tokenPath] = b - if m.Cache != nil { - m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) - } -} - -// deleteHTTPToken removes an http-01 token value from both in-memory map -// and the optional Cache, ignoring any error returned from the latter. -// -// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. -func (m *Manager) deleteHTTPToken(tokenPath string) { - m.tokensMu.Lock() - defer m.tokensMu.Unlock() - delete(m.httpTokens, tokenPath) - if m.Cache != nil { - m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) - } -} - -// httpTokenCacheKey returns a key at which an http-01 token value may be stored -// in the Manager's optional Cache. -func httpTokenCacheKey(tokenPath string) string { - return path.Base(tokenPath) + "+http-01" -} - -// renew starts a cert renewal timer loop, one per domain. -// -// The loop is scheduled in two cases: -// - a cert was fetched from cache for the first time (wasn't in m.state) -// - a new cert was created by m.createCert -// -// The key argument is a certificate private key. -// The exp argument is the cert expiration time (NotAfter). -func (m *Manager) renew(ck certKey, key crypto.Signer, exp time.Time) { - m.renewalMu.Lock() - defer m.renewalMu.Unlock() - if m.renewal[ck] != nil { - // another goroutine is already on it - return - } - if m.renewal == nil { - m.renewal = make(map[certKey]*domainRenewal) - } - dr := &domainRenewal{m: m, ck: ck, key: key} - m.renewal[ck] = dr - dr.start(exp) -} - -// stopRenew stops all currently running cert renewal timers. -// The timers are not restarted during the lifetime of the Manager. -func (m *Manager) stopRenew() { - m.renewalMu.Lock() - defer m.renewalMu.Unlock() - for name, dr := range m.renewal { - delete(m.renewal, name) - dr.stop() - } -} - -func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { - const keyName = "acme_account+key" - - // Previous versions of autocert stored the value under a different key. - const legacyKeyName = "acme_account.key" - - genKey := func() (*ecdsa.PrivateKey, error) { - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - } - - if m.Cache == nil { - return genKey() - } - - data, err := m.Cache.Get(ctx, keyName) - if err == ErrCacheMiss { - data, err = m.Cache.Get(ctx, legacyKeyName) - } - if err == ErrCacheMiss { - key, err := genKey() - if err != nil { - return nil, err - } - var buf bytes.Buffer - if err := encodeECDSAKey(&buf, key); err != nil { - return nil, err - } - if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { - return nil, err - } - return key, nil - } - if err != nil { - return nil, err - } - - priv, _ := pem.Decode(data) - if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { - return nil, errors.New("acme/autocert: invalid account key found in cache") - } - return parsePrivateKey(priv.Bytes) -} - -func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { - m.clientMu.Lock() - defer m.clientMu.Unlock() - if m.client != nil { - return m.client, nil - } - - client := m.Client - if client == nil { - client = &acme.Client{DirectoryURL: acme.LetsEncryptURL} - } - if client.Key == nil { - var err error - client.Key, err = m.accountKey(ctx) - if err != nil { - return nil, err - } - } - var contact []string - if m.Email != "" { - contact = []string{"mailto:" + m.Email} - } - a := &acme.Account{Contact: contact} - _, err := client.Register(ctx, a, m.Prompt) - if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict { - // conflict indicates the key is already registered - m.client = client - err = nil - } - return m.client, err -} - -func (m *Manager) hostPolicy() HostPolicy { - if m.HostPolicy != nil { - return m.HostPolicy - } - return defaultHostPolicy -} - -func (m *Manager) renewBefore() time.Duration { - if m.RenewBefore > renewJitter { - return m.RenewBefore - } - return 720 * time.Hour // 30 days -} - -func (m *Manager) now() time.Time { - if m.nowFunc != nil { - return m.nowFunc() - } - return time.Now() -} - -// certState is ready when its mutex is unlocked for reading. -type certState struct { - sync.RWMutex - locked bool // locked for read/write - key crypto.Signer // private key for cert - cert [][]byte // DER encoding - leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil -} - -// tlscert creates a tls.Certificate from s.key and s.cert. -// Callers should wrap it in s.RLock() and s.RUnlock(). -func (s *certState) tlscert() (*tls.Certificate, error) { - if s.key == nil { - return nil, errors.New("acme/autocert: missing signer") - } - if len(s.cert) == 0 { - return nil, errors.New("acme/autocert: missing certificate") - } - return &tls.Certificate{ - PrivateKey: s.key, - Certificate: s.cert, - Leaf: s.leaf, - }, nil -} - -// certRequest generates a CSR for the given common name cn and optional SANs. -func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) { - req := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: cn}, - DNSNames: san, - ExtraExtensions: ext, - } - return x509.CreateCertificateRequest(rand.Reader, req, key) -} - -// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates -// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. -// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. -// -// Inspired by parsePrivateKey in crypto/tls/tls.go. -func parsePrivateKey(der []byte) (crypto.Signer, error) { - if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { - return key, nil - } - if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { - switch key := key.(type) { - case *rsa.PrivateKey: - return key, nil - case *ecdsa.PrivateKey: - return key, nil - default: - return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") - } - } - if key, err := x509.ParseECPrivateKey(der); err == nil { - return key, nil - } - - return nil, errors.New("acme/autocert: failed to parse private key") -} - -// validCert parses a cert chain provided as der argument and verifies the leaf and der[0] -// correspond to the private key, the domain and key type match, and expiration dates -// are valid. It doesn't do any revocation checking. -// -// The returned value is the verified leaf cert. -func validCert(ck certKey, der [][]byte, key crypto.Signer, now time.Time) (leaf *x509.Certificate, err error) { - // parse public part(s) - var n int - for _, b := range der { - n += len(b) - } - pub := make([]byte, n) - n = 0 - for _, b := range der { - n += copy(pub[n:], b) - } - x509Cert, err := x509.ParseCertificates(pub) - if err != nil || len(x509Cert) == 0 { - return nil, errors.New("acme/autocert: no public key found") - } - // verify the leaf is not expired and matches the domain name - leaf = x509Cert[0] - if now.Before(leaf.NotBefore) { - return nil, errors.New("acme/autocert: certificate is not valid yet") - } - if now.After(leaf.NotAfter) { - return nil, errors.New("acme/autocert: expired certificate") - } - if err := leaf.VerifyHostname(ck.domain); err != nil { - return nil, err - } - // ensure the leaf corresponds to the private key and matches the certKey type - switch pub := leaf.PublicKey.(type) { - case *rsa.PublicKey: - prv, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("acme/autocert: private key type does not match public key type") - } - if pub.N.Cmp(prv.N) != 0 { - return nil, errors.New("acme/autocert: private key does not match public key") - } - if !ck.isRSA && !ck.isToken { - return nil, errors.New("acme/autocert: key type does not match expected value") - } - case *ecdsa.PublicKey: - prv, ok := key.(*ecdsa.PrivateKey) - if !ok { - return nil, errors.New("acme/autocert: private key type does not match public key type") - } - if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { - return nil, errors.New("acme/autocert: private key does not match public key") - } - if ck.isRSA && !ck.isToken { - return nil, errors.New("acme/autocert: key type does not match expected value") - } - default: - return nil, errors.New("acme/autocert: unknown public key algorithm") - } - return leaf, nil -} - -type lockedMathRand struct { - sync.Mutex - rnd *mathrand.Rand -} - -func (r *lockedMathRand) int63n(max int64) int64 { - r.Lock() - n := r.rnd.Int63n(max) - r.Unlock() - return n -} - -// For easier testing. -var ( - // Called when a state is removed. - testDidRemoveState = func(certKey) {} -) diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go deleted file mode 100644 index aa9aa845c..000000000 --- a/vendor/golang.org/x/crypto/acme/autocert/cache.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package autocert - -import ( - "context" - "errors" - "io/ioutil" - "os" - "path/filepath" -) - -// ErrCacheMiss is returned when a certificate is not found in cache. -var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") - -// Cache is used by Manager to store and retrieve previously obtained certificates -// and other account data as opaque blobs. -// -// Cache implementations should not rely on the key naming pattern. Keys can -// include any printable ASCII characters, except the following: \/:*?"<>| -type Cache interface { - // Get returns a certificate data for the specified key. - // If there's no such key, Get returns ErrCacheMiss. - Get(ctx context.Context, key string) ([]byte, error) - - // Put stores the data in the cache under the specified key. - // Underlying implementations may use any data storage format, - // as long as the reverse operation, Get, results in the original data. - Put(ctx context.Context, key string, data []byte) error - - // Delete removes a certificate data from the cache under the specified key. - // If there's no such key in the cache, Delete returns nil. - Delete(ctx context.Context, key string) error -} - -// DirCache implements Cache using a directory on the local filesystem. -// If the directory does not exist, it will be created with 0700 permissions. -type DirCache string - -// Get reads a certificate data from the specified file name. -func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { - name = filepath.Join(string(d), name) - var ( - data []byte - err error - done = make(chan struct{}) - ) - go func() { - data, err = ioutil.ReadFile(name) - close(done) - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-done: - } - if os.IsNotExist(err) { - return nil, ErrCacheMiss - } - return data, err -} - -// Put writes the certificate data to the specified file name. -// The file will be created with 0600 permissions. -func (d DirCache) Put(ctx context.Context, name string, data []byte) error { - if err := os.MkdirAll(string(d), 0700); err != nil { - return err - } - - done := make(chan struct{}) - var err error - go func() { - defer close(done) - var tmp string - if tmp, err = d.writeTempFile(name, data); err != nil { - return - } - select { - case <-ctx.Done(): - // Don't overwrite the file if the context was canceled. - default: - newName := filepath.Join(string(d), name) - err = os.Rename(tmp, newName) - } - }() - select { - case <-ctx.Done(): - return ctx.Err() - case <-done: - } - return err -} - -// Delete removes the specified file name. -func (d DirCache) Delete(ctx context.Context, name string) error { - name = filepath.Join(string(d), name) - var ( - err error - done = make(chan struct{}) - ) - go func() { - err = os.Remove(name) - close(done) - }() - select { - case <-ctx.Done(): - return ctx.Err() - case <-done: - } - if err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// writeTempFile writes b to a temporary file, closes the file and returns its path. -func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) { - // TempFile uses 0600 permissions - f, err := ioutil.TempFile(string(d), prefix) - if err != nil { - return "", err - } - if _, err := f.Write(b); err != nil { - f.Close() - return "", err - } - return f.Name(), f.Close() -} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go deleted file mode 100644 index 1e069818a..000000000 --- a/vendor/golang.org/x/crypto/acme/autocert/listener.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package autocert - -import ( - "crypto/tls" - "log" - "net" - "os" - "path/filepath" - "runtime" - "time" -) - -// NewListener returns a net.Listener that listens on the standard TLS -// port (443) on all interfaces and returns *tls.Conn connections with -// LetsEncrypt certificates for the provided domain or domains. -// -// It enables one-line HTTPS servers: -// -// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) -// -// NewListener is a convenience function for a common configuration. -// More complex or custom configurations can use the autocert.Manager -// type instead. -// -// Use of this function implies acceptance of the LetsEncrypt Terms of -// Service. If domains is not empty, the provided domains are passed -// to HostWhitelist. If domains is empty, the listener will do -// LetsEncrypt challenges for any requested domain, which is not -// recommended. -// -// Certificates are cached in a "golang-autocert" directory under an -// operating system-specific cache or temp directory. This may not -// be suitable for servers spanning multiple machines. -// -// The returned listener uses a *tls.Config that enables HTTP/2, and -// should only be used with servers that support HTTP/2. -// -// The returned Listener also enables TCP keep-alives on the accepted -// connections. The returned *tls.Conn are returned before their TLS -// handshake has completed. -func NewListener(domains ...string) net.Listener { - m := &Manager{ - Prompt: AcceptTOS, - } - if len(domains) > 0 { - m.HostPolicy = HostWhitelist(domains...) - } - dir := cacheDir() - if err := os.MkdirAll(dir, 0700); err != nil { - log.Printf("warning: autocert.NewListener not using a cache: %v", err) - } else { - m.Cache = DirCache(dir) - } - return m.Listener() -} - -// Listener listens on the standard TLS port (443) on all interfaces -// and returns a net.Listener returning *tls.Conn connections. -// -// The returned listener uses a *tls.Config that enables HTTP/2, and -// should only be used with servers that support HTTP/2. -// -// The returned Listener also enables TCP keep-alives on the accepted -// connections. The returned *tls.Conn are returned before their TLS -// handshake has completed. -// -// Unlike NewListener, it is the caller's responsibility to initialize -// the Manager m's Prompt, Cache, HostPolicy, and other desired options. -func (m *Manager) Listener() net.Listener { - ln := &listener{ - m: m, - conf: m.TLSConfig(), - } - ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") - return ln -} - -type listener struct { - m *Manager - conf *tls.Config - - tcpListener net.Listener - tcpListenErr error -} - -func (ln *listener) Accept() (net.Conn, error) { - if ln.tcpListenErr != nil { - return nil, ln.tcpListenErr - } - conn, err := ln.tcpListener.Accept() - if err != nil { - return nil, err - } - tcpConn := conn.(*net.TCPConn) - - // Because Listener is a convenience function, help out with - // this too. This is not possible for the caller to set once - // we return a *tcp.Conn wrapping an inaccessible net.Conn. - // If callers don't want this, they can do things the manual - // way and tweak as needed. But this is what net/http does - // itself, so copy that. If net/http changes, we can change - // here too. - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(3 * time.Minute) - - return tls.Server(tcpConn, ln.conf), nil -} - -func (ln *listener) Addr() net.Addr { - if ln.tcpListener != nil { - return ln.tcpListener.Addr() - } - // net.Listen failed. Return something non-nil in case callers - // call Addr before Accept: - return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} -} - -func (ln *listener) Close() error { - if ln.tcpListenErr != nil { - return ln.tcpListenErr - } - return ln.tcpListener.Close() -} - -func homeDir() string { - if runtime.GOOS == "windows" { - return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - } - if h := os.Getenv("HOME"); h != "" { - return h - } - return "/" -} - -func cacheDir() string { - const base = "golang-autocert" - switch runtime.GOOS { - case "darwin": - return filepath.Join(homeDir(), "Library", "Caches", base) - case "windows": - for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { - if v := os.Getenv(ev); v != "" { - return filepath.Join(v, base) - } - } - // Worst case: - return filepath.Join(homeDir(), base) - } - if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { - return filepath.Join(xdg, base) - } - return filepath.Join(homeDir(), ".cache", base) -} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go deleted file mode 100644 index 665f870dc..000000000 --- a/vendor/golang.org/x/crypto/acme/autocert/renewal.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package autocert - -import ( - "context" - "crypto" - "sync" - "time" -) - -// renewJitter is the maximum deviation from Manager.RenewBefore. -const renewJitter = time.Hour - -// domainRenewal tracks the state used by the periodic timers -// renewing a single domain's cert. -type domainRenewal struct { - m *Manager - ck certKey - key crypto.Signer - - timerMu sync.Mutex - timer *time.Timer -} - -// start starts a cert renewal timer at the time -// defined by the certificate expiration time exp. -// -// If the timer is already started, calling start is a noop. -func (dr *domainRenewal) start(exp time.Time) { - dr.timerMu.Lock() - defer dr.timerMu.Unlock() - if dr.timer != nil { - return - } - dr.timer = time.AfterFunc(dr.next(exp), dr.renew) -} - -// stop stops the cert renewal timer. -// If the timer is already stopped, calling stop is a noop. -func (dr *domainRenewal) stop() { - dr.timerMu.Lock() - defer dr.timerMu.Unlock() - if dr.timer == nil { - return - } - dr.timer.Stop() - dr.timer = nil -} - -// renew is called periodically by a timer. -// The first renew call is kicked off by dr.start. -func (dr *domainRenewal) renew() { - dr.timerMu.Lock() - defer dr.timerMu.Unlock() - if dr.timer == nil { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - // TODO: rotate dr.key at some point? - next, err := dr.do(ctx) - if err != nil { - next = renewJitter / 2 - next += time.Duration(pseudoRand.int63n(int64(next))) - } - dr.timer = time.AfterFunc(next, dr.renew) - testDidRenewLoop(next, err) -} - -// updateState locks and replaces the relevant Manager.state item with the given -// state. It additionally updates dr.key with the given state's key. -func (dr *domainRenewal) updateState(state *certState) { - dr.m.stateMu.Lock() - defer dr.m.stateMu.Unlock() - dr.key = state.key - dr.m.state[dr.ck] = state -} - -// do is similar to Manager.createCert but it doesn't lock a Manager.state item. -// Instead, it requests a new certificate independently and, upon success, -// replaces dr.m.state item with a new one and updates cache for the given domain. -// -// It may lock and update the Manager.state if the expiration date of the currently -// cached cert is far enough in the future. -// -// The returned value is a time interval after which the renewal should occur again. -func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { - // a race is likely unavoidable in a distributed environment - // but we try nonetheless - if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { - next := dr.next(tlscert.Leaf.NotAfter) - if next > dr.m.renewBefore()+renewJitter { - signer, ok := tlscert.PrivateKey.(crypto.Signer) - if ok { - state := &certState{ - key: signer, - cert: tlscert.Certificate, - leaf: tlscert.Leaf, - } - dr.updateState(state) - return next, nil - } - } - } - - der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck) - if err != nil { - return 0, err - } - state := &certState{ - key: dr.key, - cert: der, - leaf: leaf, - } - tlscert, err := state.tlscert() - if err != nil { - return 0, err - } - if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil { - return 0, err - } - dr.updateState(state) - return dr.next(leaf.NotAfter), nil -} - -func (dr *domainRenewal) next(expiry time.Time) time.Duration { - d := expiry.Sub(dr.m.now()) - dr.m.renewBefore() - // add a bit of randomness to renew deadline - n := pseudoRand.int63n(int64(renewJitter)) - d -= time.Duration(n) - if d < 0 { - return 0 - } - return d -} - -var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go deleted file mode 100644 index a43ce6a5f..000000000 --- a/vendor/golang.org/x/crypto/acme/http.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package acme - -import ( - "bytes" - "context" - "crypto" - "crypto/rand" - "encoding/json" - "fmt" - "io/ioutil" - "math/big" - "net/http" - "strconv" - "strings" - "time" -) - -// retryTimer encapsulates common logic for retrying unsuccessful requests. -// It is not safe for concurrent use. -type retryTimer struct { - // backoffFn provides backoff delay sequence for retries. - // See Client.RetryBackoff doc comment. - backoffFn func(n int, r *http.Request, res *http.Response) time.Duration - // n is the current retry attempt. - n int -} - -func (t *retryTimer) inc() { - t.n++ -} - -// backoff pauses the current goroutine as described in Client.RetryBackoff. -func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { - d := t.backoffFn(t.n, r, res) - if d <= 0 { - return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) - } - wakeup := time.NewTimer(d) - defer wakeup.Stop() - select { - case <-ctx.Done(): - return ctx.Err() - case <-wakeup.C: - return nil - } -} - -func (c *Client) retryTimer() *retryTimer { - f := c.RetryBackoff - if f == nil { - f = defaultBackoff - } - return &retryTimer{backoffFn: f} -} - -// defaultBackoff provides default Client.RetryBackoff implementation -// using a truncated exponential backoff algorithm, -// as described in Client.RetryBackoff. -// -// The n argument is always bounded between 1 and 30. -// The returned value is always greater than 0. -func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { - const max = 10 * time.Second - var jitter time.Duration - if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { - // Set the minimum to 1ms to avoid a case where - // an invalid Retry-After value is parsed into 0 below, - // resulting in the 0 returned value which would unintentionally - // stop the retries. - jitter = (1 + time.Duration(x.Int64())) * time.Millisecond - } - if v, ok := res.Header["Retry-After"]; ok { - return retryAfter(v[0]) + jitter - } - - if n < 1 { - n = 1 - } - if n > 30 { - n = 30 - } - d := time.Duration(1< max { - return max - } - return d -} - -// retryAfter parses a Retry-After HTTP header value, -// trying to convert v into an int (seconds) or use http.ParseTime otherwise. -// It returns zero value if v cannot be parsed. -func retryAfter(v string) time.Duration { - if i, err := strconv.Atoi(v); err == nil { - return time.Duration(i) * time.Second - } - t, err := http.ParseTime(v) - if err != nil { - return 0 - } - return t.Sub(timeNow()) -} - -// resOkay is a function that reports whether the provided response is okay. -// It is expected to keep the response body unread. -type resOkay func(*http.Response) bool - -// wantStatus returns a function which reports whether the code -// matches the status code of a response. -func wantStatus(codes ...int) resOkay { - return func(res *http.Response) bool { - for _, code := range codes { - if code == res.StatusCode { - return true - } - } - return false - } -} - -// get issues an unsigned GET request to the specified URL. -// It returns a non-error value only when ok reports true. -// -// get retries unsuccessful attempts according to c.RetryBackoff -// until the context is done or a non-retriable error is received. -func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { - retry := c.retryTimer() - for { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - res, err := c.doNoRetry(ctx, req) - switch { - case err != nil: - return nil, err - case ok(res): - return res, nil - case isRetriable(res.StatusCode): - retry.inc() - resErr := responseError(res) - res.Body.Close() - // Ignore the error value from retry.backoff - // and return the one from last retry, as received from the CA. - if retry.backoff(ctx, req, res) != nil { - return nil, resErr - } - default: - defer res.Body.Close() - return nil, responseError(res) - } - } -} - -// post issues a signed POST request in JWS format using the provided key -// to the specified URL. -// It returns a non-error value only when ok reports true. -// -// post retries unsuccessful attempts according to c.RetryBackoff -// until the context is done or a non-retriable error is received. -// It uses postNoRetry to make individual requests. -func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { - retry := c.retryTimer() - for { - res, req, err := c.postNoRetry(ctx, key, url, body) - if err != nil { - return nil, err - } - if ok(res) { - return res, nil - } - resErr := responseError(res) - res.Body.Close() - switch { - // Check for bad nonce before isRetriable because it may have been returned - // with an unretriable response code such as 400 Bad Request. - case isBadNonce(resErr): - // Consider any previously stored nonce values to be invalid. - c.clearNonces() - case !isRetriable(res.StatusCode): - return nil, resErr - } - retry.inc() - // Ignore the error value from retry.backoff - // and return the one from last retry, as received from the CA. - if err := retry.backoff(ctx, req, res); err != nil { - return nil, resErr - } - } -} - -// postNoRetry signs the body with the given key and POSTs it to the provided url. -// The body argument must be JSON-serializable. -// It is used by c.post to retry unsuccessful attempts. -func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { - nonce, err := c.popNonce(ctx, url) - if err != nil { - return nil, nil, err - } - b, err := jwsEncodeJSON(body, key, nonce) - if err != nil { - return nil, nil, err - } - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) - if err != nil { - return nil, nil, err - } - req.Header.Set("Content-Type", "application/jose+json") - res, err := c.doNoRetry(ctx, req) - if err != nil { - return nil, nil, err - } - c.addNonce(res.Header) - return res, req, nil -} - -// doNoRetry issues a request req, replacing its context (if any) with ctx. -func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { - res, err := c.httpClient().Do(req.WithContext(ctx)) - if err != nil { - select { - case <-ctx.Done(): - // Prefer the unadorned context error. - // (The acme package had tests assuming this, previously from ctxhttp's - // behavior, predating net/http supporting contexts natively) - // TODO(bradfitz): reconsider this in the future. But for now this - // requires no test updates. - return nil, ctx.Err() - default: - return nil, err - } - } - return res, nil -} - -func (c *Client) httpClient() *http.Client { - if c.HTTPClient != nil { - return c.HTTPClient - } - return http.DefaultClient -} - -// isBadNonce reports whether err is an ACME "badnonce" error. -func isBadNonce(err error) bool { - // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. - // However, ACME servers in the wild return their versions of the error. - // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 - // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. - ae, ok := err.(*Error) - return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") -} - -// isRetriable reports whether a request can be retried -// based on the response status code. -// -// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. -// Callers should parse the response and check with isBadNonce. -func isRetriable(code int) bool { - return code <= 399 || code >= 500 || code == http.StatusTooManyRequests -} - -// responseError creates an error of Error type from resp. -func responseError(resp *http.Response) error { - // don't care if ReadAll returns an error: - // json.Unmarshal will fail in that case anyway - b, _ := ioutil.ReadAll(resp.Body) - e := &wireError{Status: resp.StatusCode} - if err := json.Unmarshal(b, e); err != nil { - // this is not a regular error response: - // populate detail with anything we received, - // e.Status will already contain HTTP response code value - e.Detail = string(b) - if e.Detail == "" { - e.Detail = resp.Status - } - } - return e.error(resp.Header) -} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go deleted file mode 100644 index 1093b5039..000000000 --- a/vendor/golang.org/x/crypto/acme/jws.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package acme - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - _ "crypto/sha512" // need for EC keys - "encoding/base64" - "encoding/json" - "fmt" - "math/big" -) - -// jwsEncodeJSON signs claimset using provided key and a nonce. -// The result is serialized in JSON format. -// See https://tools.ietf.org/html/rfc7515#section-7. -func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) { - jwk, err := jwkEncode(key.Public()) - if err != nil { - return nil, err - } - alg, sha := jwsHasher(key.Public()) - if alg == "" || !sha.Available() { - return nil, ErrUnsupportedKey - } - phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce) - phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) - cs, err := json.Marshal(claimset) - if err != nil { - return nil, err - } - payload := base64.RawURLEncoding.EncodeToString(cs) - hash := sha.New() - hash.Write([]byte(phead + "." + payload)) - sig, err := jwsSign(key, sha, hash.Sum(nil)) - if err != nil { - return nil, err - } - - enc := struct { - Protected string `json:"protected"` - Payload string `json:"payload"` - Sig string `json:"signature"` - }{ - Protected: phead, - Payload: payload, - Sig: base64.RawURLEncoding.EncodeToString(sig), - } - return json.Marshal(&enc) -} - -// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. -// The result is also suitable for creating a JWK thumbprint. -// https://tools.ietf.org/html/rfc7517 -func jwkEncode(pub crypto.PublicKey) (string, error) { - switch pub := pub.(type) { - case *rsa.PublicKey: - // https://tools.ietf.org/html/rfc7518#section-6.3.1 - n := pub.N - e := big.NewInt(int64(pub.E)) - // Field order is important. - // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. - return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, - base64.RawURLEncoding.EncodeToString(e.Bytes()), - base64.RawURLEncoding.EncodeToString(n.Bytes()), - ), nil - case *ecdsa.PublicKey: - // https://tools.ietf.org/html/rfc7518#section-6.2.1 - p := pub.Curve.Params() - n := p.BitSize / 8 - if p.BitSize%8 != 0 { - n++ - } - x := pub.X.Bytes() - if n > len(x) { - x = append(make([]byte, n-len(x)), x...) - } - y := pub.Y.Bytes() - if n > len(y) { - y = append(make([]byte, n-len(y)), y...) - } - // Field order is important. - // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. - return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, - p.Name, - base64.RawURLEncoding.EncodeToString(x), - base64.RawURLEncoding.EncodeToString(y), - ), nil - } - return "", ErrUnsupportedKey -} - -// jwsSign signs the digest using the given key. -// The hash is unused for ECDSA keys. -// -// Note: non-stdlib crypto.Signer implementations are expected to return -// the signature in the format as specified in RFC7518. -// See https://tools.ietf.org/html/rfc7518 for more details. -func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { - if key, ok := key.(*ecdsa.PrivateKey); ok { - // The key.Sign method of ecdsa returns ASN1-encoded signature. - // So, we use the package Sign function instead - // to get R and S values directly and format the result accordingly. - r, s, err := ecdsa.Sign(rand.Reader, key, digest) - if err != nil { - return nil, err - } - rb, sb := r.Bytes(), s.Bytes() - size := key.Params().BitSize / 8 - if size%8 > 0 { - size++ - } - sig := make([]byte, size*2) - copy(sig[size-len(rb):], rb) - copy(sig[size*2-len(sb):], sb) - return sig, nil - } - return key.Sign(rand.Reader, digest, hash) -} - -// jwsHasher indicates suitable JWS algorithm name and a hash function -// to use for signing a digest with the provided key. -// It returns ("", 0) if the key is not supported. -func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) { - switch pub := pub.(type) { - case *rsa.PublicKey: - return "RS256", crypto.SHA256 - case *ecdsa.PublicKey: - switch pub.Params().Name { - case "P-256": - return "ES256", crypto.SHA256 - case "P-384": - return "ES384", crypto.SHA384 - case "P-521": - return "ES512", crypto.SHA512 - } - } - return "", 0 -} - -// JWKThumbprint creates a JWK thumbprint out of pub -// as specified in https://tools.ietf.org/html/rfc7638. -func JWKThumbprint(pub crypto.PublicKey) (string, error) { - jwk, err := jwkEncode(pub) - if err != nil { - return "", err - } - b := sha256.Sum256([]byte(jwk)) - return base64.RawURLEncoding.EncodeToString(b[:]), nil -} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go deleted file mode 100644 index 54792c065..000000000 --- a/vendor/golang.org/x/crypto/acme/types.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package acme - -import ( - "crypto" - "crypto/x509" - "errors" - "fmt" - "net/http" - "strings" - "time" -) - -// ACME server response statuses used to describe Authorization and Challenge states. -const ( - StatusUnknown = "unknown" - StatusPending = "pending" - StatusProcessing = "processing" - StatusValid = "valid" - StatusInvalid = "invalid" - StatusRevoked = "revoked" -) - -// CRLReasonCode identifies the reason for a certificate revocation. -type CRLReasonCode int - -// CRL reason codes as defined in RFC 5280. -const ( - CRLReasonUnspecified CRLReasonCode = 0 - CRLReasonKeyCompromise CRLReasonCode = 1 - CRLReasonCACompromise CRLReasonCode = 2 - CRLReasonAffiliationChanged CRLReasonCode = 3 - CRLReasonSuperseded CRLReasonCode = 4 - CRLReasonCessationOfOperation CRLReasonCode = 5 - CRLReasonCertificateHold CRLReasonCode = 6 - CRLReasonRemoveFromCRL CRLReasonCode = 8 - CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 - CRLReasonAACompromise CRLReasonCode = 10 -) - -// ErrUnsupportedKey is returned when an unsupported key type is encountered. -var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") - -// Error is an ACME error, defined in Problem Details for HTTP APIs doc -// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. -type Error struct { - // StatusCode is The HTTP status code generated by the origin server. - StatusCode int - // ProblemType is a URI reference that identifies the problem type, - // typically in a "urn:acme:error:xxx" form. - ProblemType string - // Detail is a human-readable explanation specific to this occurrence of the problem. - Detail string - // Header is the original server error response headers. - // It may be nil. - Header http.Header -} - -func (e *Error) Error() string { - return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) -} - -// AuthorizationError indicates that an authorization for an identifier -// did not succeed. -// It contains all errors from Challenge items of the failed Authorization. -type AuthorizationError struct { - // URI uniquely identifies the failed Authorization. - URI string - - // Identifier is an AuthzID.Value of the failed Authorization. - Identifier string - - // Errors is a collection of non-nil error values of Challenge items - // of the failed Authorization. - Errors []error -} - -func (a *AuthorizationError) Error() string { - e := make([]string, len(a.Errors)) - for i, err := range a.Errors { - e[i] = err.Error() - } - return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) -} - -// RateLimit reports whether err represents a rate limit error and -// any Retry-After duration returned by the server. -// -// See the following for more details on rate limiting: -// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 -func RateLimit(err error) (time.Duration, bool) { - e, ok := err.(*Error) - if !ok { - return 0, false - } - // Some CA implementations may return incorrect values. - // Use case-insensitive comparison. - if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { - return 0, false - } - if e.Header == nil { - return 0, true - } - return retryAfter(e.Header.Get("Retry-After")), true -} - -// Account is a user account. It is associated with a private key. -type Account struct { - // URI is the account unique ID, which is also a URL used to retrieve - // account data from the CA. - URI string - - // Contact is a slice of contact info used during registration. - Contact []string - - // The terms user has agreed to. - // A value not matching CurrentTerms indicates that the user hasn't agreed - // to the actual Terms of Service of the CA. - AgreedTerms string - - // Actual terms of a CA. - CurrentTerms string - - // Authz is the authorization URL used to initiate a new authz flow. - Authz string - - // Authorizations is a URI from which a list of authorizations - // granted to this account can be fetched via a GET request. - Authorizations string - - // Certificates is a URI from which a list of certificates - // issued for this account can be fetched via a GET request. - Certificates string -} - -// Directory is ACME server discovery data. -type Directory struct { - // RegURL is an account endpoint URL, allowing for creating new - // and modifying existing accounts. - RegURL string - - // AuthzURL is used to initiate Identifier Authorization flow. - AuthzURL string - - // CertURL is a new certificate issuance endpoint URL. - CertURL string - - // RevokeURL is used to initiate a certificate revocation flow. - RevokeURL string - - // Term is a URI identifying the current terms of service. - Terms string - - // Website is an HTTP or HTTPS URL locating a website - // providing more information about the ACME server. - Website string - - // CAA consists of lowercase hostname elements, which the ACME server - // recognises as referring to itself for the purposes of CAA record validation - // as defined in RFC6844. - CAA []string -} - -// Challenge encodes a returned CA challenge. -// Its Error field may be non-nil if the challenge is part of an Authorization -// with StatusInvalid. -type Challenge struct { - // Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01". - Type string - - // URI is where a challenge response can be posted to. - URI string - - // Token is a random value that uniquely identifies the challenge. - Token string - - // Status identifies the status of this challenge. - Status string - - // Error indicates the reason for an authorization failure - // when this challenge was used. - // The type of a non-nil value is *Error. - Error error -} - -// Authorization encodes an authorization response. -type Authorization struct { - // URI uniquely identifies a authorization. - URI string - - // Status identifies the status of an authorization. - Status string - - // Identifier is what the account is authorized to represent. - Identifier AuthzID - - // Challenges that the client needs to fulfill in order to prove possession - // of the identifier (for pending authorizations). - // For final authorizations, the challenges that were used. - Challenges []*Challenge - - // A collection of sets of challenges, each of which would be sufficient - // to prove possession of the identifier. - // Clients must complete a set of challenges that covers at least one set. - // Challenges are identified by their indices in the challenges array. - // If this field is empty, the client needs to complete all challenges. - Combinations [][]int -} - -// AuthzID is an identifier that an account is authorized to represent. -type AuthzID struct { - Type string // The type of identifier, e.g. "dns". - Value string // The identifier itself, e.g. "example.org". -} - -// wireAuthz is ACME JSON representation of Authorization objects. -type wireAuthz struct { - Status string - Challenges []wireChallenge - Combinations [][]int - Identifier struct { - Type string - Value string - } -} - -func (z *wireAuthz) authorization(uri string) *Authorization { - a := &Authorization{ - URI: uri, - Status: z.Status, - Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, - Combinations: z.Combinations, // shallow copy - Challenges: make([]*Challenge, len(z.Challenges)), - } - for i, v := range z.Challenges { - a.Challenges[i] = v.challenge() - } - return a -} - -func (z *wireAuthz) error(uri string) *AuthorizationError { - err := &AuthorizationError{ - URI: uri, - Identifier: z.Identifier.Value, - } - for _, raw := range z.Challenges { - if raw.Error != nil { - err.Errors = append(err.Errors, raw.Error.error(nil)) - } - } - return err -} - -// wireChallenge is ACME JSON challenge representation. -type wireChallenge struct { - URI string `json:"uri"` - Type string - Token string - Status string - Error *wireError -} - -func (c *wireChallenge) challenge() *Challenge { - v := &Challenge{ - URI: c.URI, - Type: c.Type, - Token: c.Token, - Status: c.Status, - } - if v.Status == "" { - v.Status = StatusPending - } - if c.Error != nil { - v.Error = c.Error.error(nil) - } - return v -} - -// wireError is a subset of fields of the Problem Details object -// as described in https://tools.ietf.org/html/rfc7807#section-3.1. -type wireError struct { - Status int - Type string - Detail string -} - -func (e *wireError) error(h http.Header) *Error { - return &Error{ - StatusCode: e.Status, - ProblemType: e.Type, - Detail: e.Detail, - Header: h, - } -} - -// CertOption is an optional argument type for the TLS ChallengeCert methods for -// customizing a temporary certificate for TLS-based challenges. -type CertOption interface { - privateCertOpt() -} - -// WithKey creates an option holding a private/public key pair. -// The private part signs a certificate, and the public part represents the signee. -func WithKey(key crypto.Signer) CertOption { - return &certOptKey{key} -} - -type certOptKey struct { - key crypto.Signer -} - -func (*certOptKey) privateCertOpt() {} - -// WithTemplate creates an option for specifying a certificate template. -// See x509.CreateCertificate for template usage details. -// -// In TLS ChallengeCert methods, the template is also used as parent, -// resulting in a self-signed certificate. -// The DNSNames field of t is always overwritten for tls-sni challenge certs. -func WithTemplate(t *x509.Certificate) CertOption { - return (*certOptTemplate)(t) -} - -type certOptTemplate x509.Certificate - -func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h deleted file mode 100644 index b3f74162f..000000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.h +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s deleted file mode 100644 index ee7b4bd5f..000000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// These constants cannot be encoded in non-MOVQ immediates. -// We access them directly from memory instead. - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s deleted file mode 100644 index cd793a5b5..000000000 --- a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -// func cswap(inout *[4][5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - SUBQ $1, SI - NOTQ SI - MOVQ SI, X15 - PSHUFD $0x44, X15, X15 - - MOVOU 0(DI), X0 - MOVOU 16(DI), X2 - MOVOU 32(DI), X4 - MOVOU 48(DI), X6 - MOVOU 64(DI), X8 - MOVOU 80(DI), X1 - MOVOU 96(DI), X3 - MOVOU 112(DI), X5 - MOVOU 128(DI), X7 - MOVOU 144(DI), X9 - - MOVO X1, X10 - MOVO X3, X11 - MOVO X5, X12 - MOVO X7, X13 - MOVO X9, X14 - - PXOR X0, X10 - PXOR X2, X11 - PXOR X4, X12 - PXOR X6, X13 - PXOR X8, X14 - PAND X15, X10 - PAND X15, X11 - PAND X15, X12 - PAND X15, X13 - PAND X15, X14 - PXOR X10, X0 - PXOR X10, X1 - PXOR X11, X2 - PXOR X11, X3 - PXOR X12, X4 - PXOR X12, X5 - PXOR X13, X6 - PXOR X13, X7 - PXOR X14, X8 - PXOR X14, X9 - - MOVOU X0, 0(DI) - MOVOU X2, 16(DI) - MOVOU X4, 32(DI) - MOVOU X6, 48(DI) - MOVOU X8, 64(DI) - MOVOU X1, 80(DI) - MOVOU X3, 96(DI) - MOVOU X5, 112(DI) - MOVOU X7, 128(DI) - MOVOU X9, 144(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 75f24babb..000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,834 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// We have an implementation in amd64 assembly so this code is only run on -// non-amd64 platforms. The amd64 assembly does not support gccgo. -// +build !amd64 gccgo appengine - -package curve25519 - -import ( - "encoding/binary" -) - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - b = -b - for i := range f { - t := b & (f[i] ^ g[i]) - f[i] ^= t - g[i] ^= t - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - return int64(binary.LittleEndian.Uint32(in)) -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 0x7fffff) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go deleted file mode 100644 index da9b10d9c..000000000 --- a/vendor/golang.org/x/crypto/curve25519/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of scalar multiplication on -// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html -package curve25519 // import "golang.org/x/crypto/curve25519" - -// basePoint is the x coordinate of the generator of the curve. -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -// ScalarMult sets dst to the product in*base where dst and base are the x -// coordinates of group points and all values are in little-endian form. -func ScalarMult(dst, in, base *[32]byte) { - scalarMult(dst, in, base) -} - -// ScalarBaseMult sets dst to the product in*base where dst and base are the x -// coordinates of group points, base is the standard generator and all values -// are in little-endian form. -func ScalarBaseMult(dst, in *[32]byte) { - ScalarMult(dst, in, &basePoint) -} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s deleted file mode 100644 index 390816106..000000000 --- a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$0-8 - MOVQ inout+0(FP), DI - - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ $REDMASK51,AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s deleted file mode 100644 index e0ac30c70..000000000 --- a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s +++ /dev/null @@ -1,1377 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$296-8 - MOVQ inout+0(FP),DI - - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,0(SP) - MOVQ DX,8(SP) - MOVQ CX,16(SP) - MOVQ R8,24(SP) - MOVQ R9,32(SP) - MOVQ AX,40(SP) - MOVQ R10,48(SP) - MOVQ R11,56(SP) - MOVQ R12,64(SP) - MOVQ R13,72(SP) - MOVQ 40(SP),AX - MULQ 40(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 48(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 48(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(SP) - MOVQ R8,88(SP) - MOVQ R9,96(SP) - MOVQ AX,104(SP) - MOVQ R10,112(SP) - MOVQ 0(SP),AX - MULQ 0(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 8(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 32(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(SP) - MOVQ R8,128(SP) - MOVQ R9,136(SP) - MOVQ AX,144(SP) - MOVQ R10,152(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 80(SP),SI - SUBQ 88(SP),DX - SUBQ 96(SP),CX - SUBQ 104(SP),R8 - SUBQ 112(SP),R9 - MOVQ SI,160(SP) - MOVQ DX,168(SP) - MOVQ CX,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,200(SP) - MOVQ DX,208(SP) - MOVQ CX,216(SP) - MOVQ R8,224(SP) - MOVQ R9,232(SP) - MOVQ AX,240(SP) - MOVQ R10,248(SP) - MOVQ R11,256(SP) - MOVQ R12,264(SP) - MOVQ R13,272(SP) - MOVQ 224(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,280(SP) - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 232(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,288(SP) - MULQ 48(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 40(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 200(SP),AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 200(SP),AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 200(SP),AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 208(SP),AX - MULQ 40(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 208(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),AX - MULQ 40(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 216(SP),AX - MULQ 48(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 216(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 224(SP),AX - MULQ 40(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 224(SP),AX - MULQ 48(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 280(SP),AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 232(SP),AX - MULQ 40(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 288(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 288(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(SP) - MOVQ R8,48(SP) - MOVQ R9,56(SP) - MOVQ AX,64(SP) - MOVQ R10,72(SP) - MOVQ 264(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,200(SP) - MULQ 16(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,208(SP) - MULQ 8(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 0(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 240(SP),AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 240(SP),AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 248(SP),AX - MULQ 0(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 248(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 248(SP),AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 248(SP),AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 248(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 0(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 256(SP),AX - MULQ 8(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 256(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 0(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 8(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 200(SP),AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 0(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),AX - MULQ 16(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 24(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 40(SP),SI - ADDQ 48(SP),R8 - ADDQ 56(SP),R9 - ADDQ 64(SP),AX - ADDQ 72(SP),R10 - SUBQ 40(SP),DX - SUBQ 48(SP),CX - SUBQ 56(SP),R11 - SUBQ 64(SP),R12 - SUBQ 72(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 144(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 152(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 88(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(SP),AX - MULQ 96(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(SP),AX - MULQ 104(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(SP),AX - MULQ 112(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 128(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 136(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 136(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(SP),AX - MULQ 80(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 144(SP),AX - MULQ 88(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 104(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 112(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 160(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 168(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 176(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 184(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 192(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 80(SP),SI - ADDQ 88(SP),CX - ADDQ 96(SP),R8 - ADDQ 104(SP),R9 - ADDQ 112(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 176(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 168(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 176(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 184(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 192(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 176(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 184(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 176(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 160(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 168(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 184(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 192(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 176(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 184(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 192(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go deleted file mode 100644 index 5822bd533..000000000 --- a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s deleted file mode 100644 index 1f76d1a3f..000000000 --- a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$16-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,0(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 0(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 0(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ $REDMASK51,SI - SHLQ $13,R8,R9 - ANDQ SI,R8 - SHLQ $13,R10,R11 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BX,BP - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s deleted file mode 100644 index 07511a45a..000000000 --- a/vendor/golang.org/x/crypto/curve25519/square_amd64.s +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$0-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ $REDMASK51,SI - SHLQ $13,CX,R8 - ANDQ SI,CX - SHLQ $13,R9,R10 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R11,R12 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R13,R14 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,R15,BX - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index d6f683ba3..000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -package ed25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" - "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c1..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252a..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s deleted file mode 100644 index b3a16ef75..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11 -// +build !gccgo,!appengine - -#include "textflag.h" - -#define NUM_ROUNDS 10 - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD dst+0(FP), R1 - MOVD src+24(FP), R2 - MOVD src_len+32(FP), R3 - MOVD key+48(FP), R4 - MOVD nonce+56(FP), R6 - MOVD counter+64(FP), R7 - - MOVD $·constants(SB), R10 - MOVD $·incRotMatrix(SB), R11 - - MOVW (R7), R20 - - AND $~255, R3, R13 - ADD R2, R13, R12 // R12 for block end - AND $255, R3, R13 -loop: - MOVD $NUM_ROUNDS, R21 - VLD1 (R11), [V30.S4, V31.S4] - - // load contants - // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] - WORD $0x4D60E940 - - // load keys - // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] - WORD $0x4DFFE884 - // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] - WORD $0x4DFFE888 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V12.S4] - WORD $0x4D40C8EC - - // VLD3R (R6), [V13.S4, V14.S4, V15.S4] - WORD $0x4D40E8CD - - // update counter - VADD V30.S4, V12.S4, V12.S4 - -chacha: - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) - VADD V8.S4, V12.S4, V8.S4 - VADD V9.S4, V13.S4, V9.S4 - VADD V10.S4, V14.S4, V10.S4 - VADD V11.S4, V15.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $12, V16.S4, V4.S4 - VSHL $12, V17.S4, V5.S4 - VSHL $12, V18.S4, V6.S4 - VSHL $12, V19.S4, V7.S4 - VSRI $20, V16.S4, V4.S4 - VSRI $20, V17.S4, V5.S4 - VSRI $20, V18.S4, V6.S4 - VSRI $20, V19.S4, V7.S4 - - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) - VADD V12.S4, V8.S4, V8.S4 - VADD V13.S4, V9.S4, V9.S4 - VADD V14.S4, V10.S4, V10.S4 - VADD V15.S4, V11.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $7, V16.S4, V4.S4 - VSHL $7, V17.S4, V5.S4 - VSHL $7, V18.S4, V6.S4 - VSHL $7, V19.S4, V7.S4 - VSRI $25, V16.S4, V4.S4 - VSRI $25, V17.S4, V5.S4 - VSRI $25, V18.S4, V6.S4 - VSRI $25, V19.S4, V7.S4 - - // V0..V3 += V5..V7, V4 - // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) - VADD V0.S4, V5.S4, V0.S4 - VADD V1.S4, V6.S4, V1.S4 - VADD V2.S4, V7.S4, V2.S4 - VADD V3.S4, V4.S4, V3.S4 - VEOR V15.B16, V0.B16, V15.B16 - VEOR V12.B16, V1.B16, V12.B16 - VEOR V13.B16, V2.B16, V13.B16 - VEOR V14.B16, V3.B16, V14.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 12) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $12, V16.S4, V5.S4 - VSHL $12, V17.S4, V6.S4 - VSHL $12, V18.S4, V7.S4 - VSHL $12, V19.S4, V4.S4 - VSRI $20, V16.S4, V5.S4 - VSRI $20, V17.S4, V6.S4 - VSRI $20, V18.S4, V7.S4 - VSRI $20, V19.S4, V4.S4 - - // V0 += V5; V15 <<<= ((V0 XOR V15), 8) - // ... - VADD V5.S4, V0.S4, V0.S4 - VADD V6.S4, V1.S4, V1.S4 - VADD V7.S4, V2.S4, V2.S4 - VADD V4.S4, V3.S4, V3.S4 - VEOR V0.B16, V15.B16, V15.B16 - VEOR V1.B16, V12.B16, V12.B16 - VEOR V2.B16, V13.B16, V13.B16 - VEOR V3.B16, V14.B16, V14.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 7) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $7, V16.S4, V5.S4 - VSHL $7, V17.S4, V6.S4 - VSHL $7, V18.S4, V7.S4 - VSHL $7, V19.S4, V4.S4 - VSRI $25, V16.S4, V5.S4 - VSRI $25, V17.S4, V6.S4 - VSRI $25, V18.S4, V7.S4 - VSRI $25, V19.S4, V4.S4 - - SUB $1, R21 - CBNZ R21, chacha - - // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] - WORD $0x4D60E950 - - // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] - WORD $0x4DFFE894 - VADD V30.S4, V12.S4, V12.S4 - VADD V16.S4, V0.S4, V0.S4 - VADD V17.S4, V1.S4, V1.S4 - VADD V18.S4, V2.S4, V2.S4 - VADD V19.S4, V3.S4, V3.S4 - // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] - WORD $0x4DFFE898 - // restore R4 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V28.S4] - WORD $0x4D40C8FC - // VLD3R (R6), [V29.S4, V30.S4, V31.S4] - WORD $0x4D40E8DD - - VADD V20.S4, V4.S4, V4.S4 - VADD V21.S4, V5.S4, V5.S4 - VADD V22.S4, V6.S4, V6.S4 - VADD V23.S4, V7.S4, V7.S4 - VADD V24.S4, V8.S4, V8.S4 - VADD V25.S4, V9.S4, V9.S4 - VADD V26.S4, V10.S4, V10.S4 - VADD V27.S4, V11.S4, V11.S4 - VADD V28.S4, V12.S4, V12.S4 - VADD V29.S4, V13.S4, V13.S4 - VADD V30.S4, V14.S4, V14.S4 - VADD V31.S4, V15.S4, V15.S4 - - VZIP1 V1.S4, V0.S4, V16.S4 - VZIP2 V1.S4, V0.S4, V17.S4 - VZIP1 V3.S4, V2.S4, V18.S4 - VZIP2 V3.S4, V2.S4, V19.S4 - VZIP1 V5.S4, V4.S4, V20.S4 - VZIP2 V5.S4, V4.S4, V21.S4 - VZIP1 V7.S4, V6.S4, V22.S4 - VZIP2 V7.S4, V6.S4, V23.S4 - VZIP1 V9.S4, V8.S4, V24.S4 - VZIP2 V9.S4, V8.S4, V25.S4 - VZIP1 V11.S4, V10.S4, V26.S4 - VZIP2 V11.S4, V10.S4, V27.S4 - VZIP1 V13.S4, V12.S4, V28.S4 - VZIP2 V13.S4, V12.S4, V29.S4 - VZIP1 V15.S4, V14.S4, V30.S4 - VZIP2 V15.S4, V14.S4, V31.S4 - VZIP1 V18.D2, V16.D2, V0.D2 - VZIP2 V18.D2, V16.D2, V4.D2 - VZIP1 V19.D2, V17.D2, V8.D2 - VZIP2 V19.D2, V17.D2, V12.D2 - VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] - - VZIP1 V22.D2, V20.D2, V1.D2 - VZIP2 V22.D2, V20.D2, V5.D2 - VZIP1 V23.D2, V21.D2, V9.D2 - VZIP2 V23.D2, V21.D2, V13.D2 - VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] - VZIP1 V26.D2, V24.D2, V2.D2 - VZIP2 V26.D2, V24.D2, V6.D2 - VZIP1 V27.D2, V25.D2, V10.D2 - VZIP2 V27.D2, V25.D2, V14.D2 - VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] - VZIP1 V30.D2, V28.D2, V3.D2 - VZIP2 V30.D2, V28.D2, V7.D2 - VZIP1 V31.D2, V29.D2, V11.D2 - VZIP2 V31.D2, V29.D2, V15.D2 - VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] - VEOR V0.B16, V16.B16, V16.B16 - VEOR V1.B16, V17.B16, V17.B16 - VEOR V2.B16, V18.B16, V18.B16 - VEOR V3.B16, V19.B16, V19.B16 - VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) - VEOR V4.B16, V20.B16, V20.B16 - VEOR V5.B16, V21.B16, V21.B16 - VEOR V6.B16, V22.B16, V22.B16 - VEOR V7.B16, V23.B16, V23.B16 - VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) - VEOR V8.B16, V24.B16, V24.B16 - VEOR V9.B16, V25.B16, V25.B16 - VEOR V10.B16, V26.B16, V26.B16 - VEOR V11.B16, V27.B16, V27.B16 - VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) - VEOR V12.B16, V28.B16, V28.B16 - VEOR V13.B16, V29.B16, V29.B16 - VEOR V14.B16, V30.B16, V30.B16 - VEOR V15.B16, V31.B16, V31.B16 - VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) - - ADD $4, R20 - MOVW R20, (R7) // update counter - - CMP R2, R12 - BGT loop - - RET - - -DATA ·constants+0x00(SB)/4, $0x61707865 -DATA ·constants+0x04(SB)/4, $0x3320646e -DATA ·constants+0x08(SB)/4, $0x79622d32 -DATA ·constants+0x0c(SB)/4, $0x6b206574 -GLOBL ·constants(SB), NOPTR|RODATA, $32 - -DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 -DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 -DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 -DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 -DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 -DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 -DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B -DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F -GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go deleted file mode 100644 index ad74e23ae..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11 -// +build !gccgo - -package chacha20 - -const ( - haveAsm = true - bufSize = 256 -) - -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { - - if len(src) >= bufSize { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) - } - - if len(src)%bufSize != 0 { - i := len(src) - len(src)%bufSize - c.buf = [bufSize]byte{} - copy(c.buf[:], src[i:]) - xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter) - c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize]) - } -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go deleted file mode 100644 index 6570847f5..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ChaCha20 implements the core ChaCha20 function as specified -// in https://tools.ietf.org/html/rfc7539#section-2.3. -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - - "golang.org/x/crypto/internal/subtle" -) - -// assert that *Cipher implements cipher.Stream -var _ cipher.Stream = (*Cipher)(nil) - -// Cipher is a stateful instance of ChaCha20 using a particular key -// and nonce. A *Cipher implements the cipher.Stream interface. -type Cipher struct { - key [8]uint32 - counter uint32 // incremented after each block - nonce [3]uint32 - buf [bufSize]byte // buffer for unused keystream bytes - len int // number of unused keystream bytes at end of buf -} - -// New creates a new ChaCha20 stream cipher with the given key and nonce. -// The initial counter value is set to 0. -func New(key [8]uint32, nonce [3]uint32) *Cipher { - return &Cipher{key: key, nonce: nonce} -} - -// ChaCha20 constants spelling "expand 32-byte k" -const ( - j0 uint32 = 0x61707865 - j1 uint32 = 0x3320646e - j2 uint32 = 0x79622d32 - j3 uint32 = 0x6b206574 -) - -func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { - a += b - d ^= a - d = (d << 16) | (d >> 16) - c += d - b ^= c - b = (b << 12) | (b >> 20) - a += b - d ^= a - d = (d << 8) | (d >> 24) - c += d - b ^= c - b = (b << 7) | (b >> 25) - return a, b, c, d -} - -// XORKeyStream XORs each byte in the given slice with a byte from the -// cipher's key stream. Dst and src must overlap entirely or not at all. -// -// If len(dst) < len(src), XORKeyStream will panic. It is acceptable -// to pass a dst bigger than src, and in that case, XORKeyStream will -// only update dst[:len(src)] and will not touch the rest of dst. -// -// Multiple calls to XORKeyStream behave as if the concatenation of -// the src buffers was passed in a single run. That is, Cipher -// maintains state and does not reset at each XORKeyStream call. -func (s *Cipher) XORKeyStream(dst, src []byte) { - if len(dst) < len(src) { - panic("chacha20: output smaller than input") - } - if subtle.InexactOverlap(dst[:len(src)], src) { - panic("chacha20: invalid buffer overlap") - } - - // xor src with buffered keystream first - if s.len != 0 { - buf := s.buf[len(s.buf)-s.len:] - if len(src) < len(buf) { - buf = buf[:len(src)] - } - td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint - for i, b := range buf { - td[i] = ts[i] ^ b - } - s.len -= len(buf) - if s.len != 0 { - return - } - s.buf = [len(s.buf)]byte{} // zero the empty buffer - src = src[len(buf):] - dst = dst[len(buf):] - } - - if len(src) == 0 { - return - } - if haveAsm { - if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 { - panic("chacha20: counter overflow") - } - s.xorKeyStreamAsm(dst, src) - return - } - - // set up a 64-byte buffer to pad out the final block if needed - // (hoisted out of the main loop to avoid spills) - rem := len(src) % 64 // length of final block - fin := len(src) - rem // index of final block - if rem > 0 { - copy(s.buf[len(s.buf)-64:], src[fin:]) - } - - // pre-calculate most of the first round - s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0]) - s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1]) - s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2]) - - n := len(src) - src, dst = src[:n:n], dst[:n:n] // BCE hint - for i := 0; i < n; i += 64 { - // calculate the remainder of the first round - s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter) - - // execute the second round - x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15) - x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12) - x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13) - x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14) - - // execute the remaining 18 rounds - for i := 0; i < 9; i++ { - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - - x4 += s.key[0] - x5 += s.key[1] - x6 += s.key[2] - x7 += s.key[3] - x8 += s.key[4] - x9 += s.key[5] - x10 += s.key[6] - x11 += s.key[7] - - x12 += s.counter - x13 += s.nonce[0] - x14 += s.nonce[1] - x15 += s.nonce[2] - - // increment the counter - s.counter += 1 - if s.counter == 0 { - panic("chacha20: counter overflow") - } - - // pad to 64 bytes if needed - in, out := src[i:], dst[i:] - if i == fin { - // src[fin:] has already been copied into s.buf before - // the main loop - in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:] - } - in, out = in[:64], out[:64] // BCE hint - - // XOR the key stream with the source and write out the result - xor(out[0:], in[0:], x0) - xor(out[4:], in[4:], x1) - xor(out[8:], in[8:], x2) - xor(out[12:], in[12:], x3) - xor(out[16:], in[16:], x4) - xor(out[20:], in[20:], x5) - xor(out[24:], in[24:], x6) - xor(out[28:], in[28:], x7) - xor(out[32:], in[32:], x8) - xor(out[36:], in[36:], x9) - xor(out[40:], in[40:], x10) - xor(out[44:], in[44:], x11) - xor(out[48:], in[48:], x12) - xor(out[52:], in[52:], x13) - xor(out[56:], in[56:], x14) - xor(out[60:], in[60:], x15) - } - // copy any trailing bytes out of the buffer and into dst - if rem != 0 { - s.len = 64 - rem - copy(dst[fin:], s.buf[len(s.buf)-64:]) - } -} - -// Advance discards bytes in the key stream until the next 64 byte block -// boundary is reached and updates the counter accordingly. If the key -// stream is already at a block boundary no bytes will be discarded and -// the counter will be unchanged. -func (s *Cipher) Advance() { - s.len -= s.len % 64 - if s.len == 0 { - s.buf = [len(s.buf)]byte{} - } -} - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out must overlap entirely or not at all. Counter contains the raw -// ChaCha20 counter bytes (i.e. block counter followed by nonce). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - s := Cipher{ - key: [8]uint32{ - binary.LittleEndian.Uint32(key[0:4]), - binary.LittleEndian.Uint32(key[4:8]), - binary.LittleEndian.Uint32(key[8:12]), - binary.LittleEndian.Uint32(key[12:16]), - binary.LittleEndian.Uint32(key[16:20]), - binary.LittleEndian.Uint32(key[20:24]), - binary.LittleEndian.Uint32(key[24:28]), - binary.LittleEndian.Uint32(key[28:32]), - }, - nonce: [3]uint32{ - binary.LittleEndian.Uint32(counter[4:8]), - binary.LittleEndian.Uint32(counter[8:12]), - binary.LittleEndian.Uint32(counter[12:16]), - }, - counter: binary.LittleEndian.Uint32(counter[0:4]), - } - s.XORKeyStream(out, in) -} - -// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a -// nonce. It should only be used as part of the XChaCha20 construction. -func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 { - x0, x1, x2, x3 := j0, j1, j2, j3 - x4, x5, x6, x7 := key[0], key[1], key[2], key[3] - x8, x9, x10, x11 := key[4], key[5], key[6], key[7] - x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3] - - for i := 0; i < 10; i++ { - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - var out [8]uint32 - out[0], out[1], out[2], out[3] = x0, x1, x2, x3 - out[4], out[5], out[6], out[7] = x12, x13, x14, x15 - return out -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go deleted file mode 100644 index 47eac0314..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !arm64,!s390x arm64,!go1.11 gccgo appengine - -package chacha20 - -const ( - bufSize = 64 - haveAsm = false -) - -func (*Cipher) xorKeyStreamAsm(dst, src []byte) { - panic("not implemented") -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go deleted file mode 100644 index aad645b44..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,!gccgo,!appengine - -package chacha20 - -import ( - "golang.org/x/sys/cpu" -) - -var haveAsm = cpu.S390X.HasVX - -const bufSize = 256 - -// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only -// be called when the vector facility is available. -// Implementation in asm_s390x.s. -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) - -func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len) -} - -// EXRL targets, DO NOT CALL! -func mvcSrcToBuf() -func mvcBufToDst() diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s deleted file mode 100644 index 57df40446..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,!gccgo,!appengine - -#include "go_asm.h" -#include "textflag.h" - -// This is an implementation of the ChaCha20 encryption algorithm as -// specified in RFC 7539. It uses vector instructions to compute -// 4 keystream blocks in parallel (256 bytes) which are then XORed -// with the bytes in the input slice. - -GLOBL ·constants<>(SB), RODATA|NOPTR, $32 -// BSWAP: swap bytes in each 4-byte element -DATA ·constants<>+0x00(SB)/4, $0x03020100 -DATA ·constants<>+0x04(SB)/4, $0x07060504 -DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 -DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c -// J0: [j0, j1, j2, j3] -DATA ·constants<>+0x10(SB)/4, $0x61707865 -DATA ·constants<>+0x14(SB)/4, $0x3320646e -DATA ·constants<>+0x18(SB)/4, $0x79622d32 -DATA ·constants<>+0x1c(SB)/4, $0x6b206574 - -// EXRL targets: -TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0 - MVC $1, (R1), (R8) - RET - -TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0 - MVC $1, (R8), (R9) - RET - -#define BSWAP V5 -#define J0 V6 -#define KEY0 V7 -#define KEY1 V8 -#define NONCE V9 -#define CTR V10 -#define M0 V11 -#define M1 V12 -#define M2 V13 -#define M3 V14 -#define INC V15 -#define X0 V16 -#define X1 V17 -#define X2 V18 -#define X3 V19 -#define X4 V20 -#define X5 V21 -#define X6 V22 -#define X7 V23 -#define X8 V24 -#define X9 V25 -#define X10 V26 -#define X11 V27 -#define X12 V28 -#define X13 V29 -#define X14 V30 -#define X15 V31 - -#define NUM_ROUNDS 20 - -#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $16, a2, a2 \ - VERLLF $16, b2, b2 \ - VERLLF $16, c2, c2 \ - VERLLF $16, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $12, a1, a1 \ - VERLLF $12, b1, b1 \ - VERLLF $12, c1, c1 \ - VERLLF $12, d1, d1 \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $8, a2, a2 \ - VERLLF $8, b2, b2 \ - VERLLF $8, c2, c2 \ - VERLLF $8, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $7, a1, a1 \ - VERLLF $7, b1, b1 \ - VERLLF $7, c1, c1 \ - VERLLF $7, d1, d1 - -#define PERMUTE(mask, v0, v1, v2, v3) \ - VPERM v0, v0, mask, v0 \ - VPERM v1, v1, mask, v1 \ - VPERM v2, v2, mask, v2 \ - VPERM v3, v3, mask, v3 - -#define ADDV(x, v0, v1, v2, v3) \ - VAF x, v0, v0 \ - VAF x, v1, v1 \ - VAF x, v2, v2 \ - VAF x, v3, v3 - -#define XORV(off, dst, src, v0, v1, v2, v3) \ - VLM off(src), M0, M3 \ - PERMUTE(BSWAP, v0, v1, v2, v3) \ - VX v0, M0, M0 \ - VX v1, M1, M1 \ - VX v2, M2, M2 \ - VX v3, M3, M3 \ - VSTM M0, M3, off(dst) - -#define SHUFFLE(a, b, c, d, t, u, v, w) \ - VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} - VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} - VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} - VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} - VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} - VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} - VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} - VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD $·constants<>(SB), R1 - MOVD dst+0(FP), R2 // R2=&dst[0] - LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) - MOVD key+48(FP), R5 // R5=key - MOVD nonce+56(FP), R6 // R6=nonce - MOVD counter+64(FP), R7 // R7=counter - MOVD buf+72(FP), R8 // R8=buf - MOVD len+80(FP), R9 // R9=len - - // load BSWAP and J0 - VLM (R1), BSWAP, J0 - - // set up tail buffer - ADD $-1, R4, R12 - MOVBZ R12, R12 - CMPUBEQ R12, $255, aligned - MOVD R4, R1 - AND $~255, R1 - MOVD $(R3)(R1*1), R1 - EXRL $·mvcSrcToBuf(SB), R12 - MOVD $255, R0 - SUB R12, R0 - MOVD R0, (R9) // update len - -aligned: - // setup - MOVD $95, R0 - VLM (R5), KEY0, KEY1 - VLL R0, (R6), NONCE - VZERO M0 - VLEIB $7, $32, M0 - VSRLB M0, NONCE, NONCE - - // initialize counter values - VLREPF (R7), CTR - VZERO INC - VLEIF $1, $1, INC - VLEIF $2, $2, INC - VLEIF $3, $3, INC - VAF INC, CTR, CTR - VREPIF $4, INC - -chacha: - VREPF $0, J0, X0 - VREPF $1, J0, X1 - VREPF $2, J0, X2 - VREPF $3, J0, X3 - VREPF $0, KEY0, X4 - VREPF $1, KEY0, X5 - VREPF $2, KEY0, X6 - VREPF $3, KEY0, X7 - VREPF $0, KEY1, X8 - VREPF $1, KEY1, X9 - VREPF $2, KEY1, X10 - VREPF $3, KEY1, X11 - VLR CTR, X12 - VREPF $1, NONCE, X13 - VREPF $2, NONCE, X14 - VREPF $3, NONCE, X15 - - MOVD $(NUM_ROUNDS/2), R1 - -loop: - ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) - ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) - - ADD $-1, R1 - BNE loop - - // decrement length - ADD $-256, R4 - BLT tail - -continue: - // rearrange vectors - SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) - ADDV(J0, X0, X1, X2, X3) - SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) - ADDV(KEY0, X4, X5, X6, X7) - SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) - ADDV(KEY1, X8, X9, X10, X11) - VAF CTR, X12, X12 - SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) - ADDV(NONCE, X12, X13, X14, X15) - - // increment counters - VAF INC, CTR, CTR - - // xor keystream with plaintext - XORV(0*64, R2, R3, X0, X4, X8, X12) - XORV(1*64, R2, R3, X1, X5, X9, X13) - XORV(2*64, R2, R3, X2, X6, X10, X14) - XORV(3*64, R2, R3, X3, X7, X11, X15) - - // increment pointers - MOVD $256(R2), R2 - MOVD $256(R3), R3 - - CMPBNE R4, $0, chacha - CMPUBEQ R12, $255, return - EXRL $·mvcBufToDst(SB), R12 // len was updated during setup - -return: - VSTEF $0, CTR, (R7) - RET - -tail: - MOVD R2, R9 - MOVD R8, R2 - MOVD R8, R3 - MOVD $0, R4 - JMP continue diff --git a/vendor/golang.org/x/crypto/internal/chacha20/xor.go b/vendor/golang.org/x/crypto/internal/chacha20/xor.go deleted file mode 100644 index 9c5ba0b33..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/xor.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found src the LICENSE file. - -package chacha20 - -import ( - "runtime" -) - -// Platforms that have fast unaligned 32-bit little endian accesses. -const unaligned = runtime.GOARCH == "386" || - runtime.GOARCH == "amd64" || - runtime.GOARCH == "arm64" || - runtime.GOARCH == "ppc64le" || - runtime.GOARCH == "s390x" - -// xor reads a little endian uint32 from src, XORs it with u and -// places the result in little endian byte order in dst. -func xor(dst, src []byte, u uint32) { - _, _ = src[3], dst[3] // eliminate bounds checks - if unaligned { - // The compiler should optimize this code into - // 32-bit unaligned little endian loads and stores. - // TODO: delete once the compiler does a reliably - // good job with the generic code below. - // See issue #25111 for more details. - v := uint32(src[0]) - v |= uint32(src[1]) << 8 - v |= uint32(src[2]) << 16 - v |= uint32(src[3]) << 24 - v ^= u - dst[0] = byte(v) - dst[1] = byte(v >> 8) - dst[2] = byte(v >> 16) - dst[3] = byte(v >> 24) - } else { - dst[0] = src[0] ^ byte(u) - dst[1] = src[1] ^ byte(u>>8) - dst[2] = src[2] ^ byte(u>>16) - dst[3] = src[3] ^ byte(u>>24) - } -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go deleted file mode 100644 index f38797bfa..000000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go deleted file mode 100644 index 0cc4a8a64..000000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go deleted file mode 100644 index 8387d2999..000000000 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 gccgo appengine - -package poly1305 - -type mac struct{ macGeneric } - -func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go deleted file mode 100644 index d076a5623..000000000 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package poly1305 implements Poly1305 one-time message authentication code as -// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. -// -// Poly1305 is a fast, one-time authentication function. It is infeasible for an -// attacker to generate an authenticator for a message without the key. However, a -// key must only be used for a single message. Authenticating two different -// messages with the same key allows an attacker to forge authenticators for other -// messages with the same key. -// -// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -// used with a fixed key in order to generate one-time keys from an nonce. -// However, in this package AES isn't used and the one-time key is specified -// directly. -package poly1305 // import "golang.org/x/crypto/poly1305" - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Verify returns true if mac is a valid authenticator for m with the given -// key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} - -// New returns a new MAC computing an authentication -// tag of all data written to it with the given key. -// This allows writing the message progressively instead -// of passing it as a single slice. Common users should use -// the Sum function instead. -// -// The key must be unique for each message, as authenticating -// two different messages with the same key allows an attacker -// to forge messages at will. -func New(key *[32]byte) *MAC { - return &MAC{ - mac: newMAC(key), - finalized: false, - } -} - -// MAC is an io.Writer computing an authentication tag -// of the data written to it. -// -// MAC cannot be used like common hash.Hash implementations, -// because using a poly1305 key twice breaks its security. -// Therefore writing data to a running MAC after calling -// Sum causes it to panic. -type MAC struct { - mac // platform-dependent implementation - - finalized bool -} - -// Size returns the number of bytes Sum will return. -func (h *MAC) Size() int { return TagSize } - -// Write adds more data to the running message authentication code. -// It never returns an error. -// -// It must not be called after the first call of Sum. -func (h *MAC) Write(p []byte) (n int, err error) { - if h.finalized { - panic("poly1305: write to MAC after Sum") - } - return h.mac.Write(p) -} - -// Sum computes the authenticator of all data written to the -// message authentication code. -func (h *MAC) Sum(b []byte) []byte { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return append(b, mac[:]...) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go deleted file mode 100644 index 2dbf42aa5..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -package poly1305 - -//go:noescape -func initialize(state *[7]uint64, key *[32]byte) - -//go:noescape -func update(state *[7]uint64, msg []byte) - -//go:noescape -func finalize(tag *[TagSize]byte, state *[7]uint64) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := newMAC(key) - h.Write(m) - h.Sum(out) -} - -func newMAC(key *[32]byte) (h mac) { - initialize(&h.state, key) - return -} - -type mac struct { - state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } - - buffer [TagSize]byte - offset int -} - -func (h *mac) Write(p []byte) (n int, err error) { - n = len(p) - if h.offset > 0 { - remaining := TagSize - h.offset - if n < remaining { - h.offset += copy(h.buffer[h.offset:], p) - return n, nil - } - copy(h.buffer[h.offset:], p[:remaining]) - p = p[remaining:] - h.offset = 0 - update(&h.state, h.buffer[:]) - } - if nn := len(p) - (len(p) % TagSize); nn > 0 { - update(&h.state, p[:nn]) - p = p[nn:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return n, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.state - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s deleted file mode 100644 index 7d600f13c..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVQ state+0(FP), DI - MOVQ msg_base+8(FP), SI - MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 - JB bytes_between_0_and_15 - -loop: - POLY1305_ADD(SI, R8, R9, R10) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop - -bytes_between_0_and_15: - TESTQ R15, R15 - JZ done - MOVQ $1, BX - XORQ CX, CX - XORQ R13, R13 - ADDQ R15, SI - -flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX - MOVB -1(SI), R13 - XORQ R13, BX - DECQ SI - DECQ R15 - JNZ flush_buffer - - ADDQ BX, R8 - ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 - JMP multiply - -done: - MOVQ R8, 0(DI) - MOVQ R9, 8(DI) - MOVQ R10, 16(DI) - RET - -// func initialize(state *[7]uint64, key *[32]byte) -TEXT ·initialize(SB), $0-16 - MOVQ state+0(FP), DI - MOVQ key+8(FP), SI - - // state[0...7] is initialized with zero - MOVOU 0(SI), X0 - MOVOU 16(SI), X1 - MOVOU ·poly1305Mask<>(SB), X2 - PAND X2, X0 - MOVOU X0, 24(DI) - MOVOU X1, 40(DI) - RET - -// func finalize(tag *[TagSize]byte, state *[7]uint64) -TEXT ·finalize(SB), $0-16 - MOVQ tag+0(FP), DI - MOVQ state+8(FP), SI - - MOVQ 0(SI), AX - MOVQ 8(SI), BX - MOVQ 16(SI), CX - MOVQ AX, R8 - MOVQ BX, R9 - SUBQ $0xFFFFFFFFFFFFFFFB, AX - SBBQ $0xFFFFFFFFFFFFFFFF, BX - SBBQ $3, CX - CMOVQCS R8, AX - CMOVQCS R9, BX - ADDQ 40(SI), AX - ADCQ 48(SI), BX - - MOVQ AX, 0(DI) - MOVQ BX, 8(DI) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go deleted file mode 100644 index 5dc321c2f..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_arm.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine,!nacl - -package poly1305 - -// This function is implemented in sum_arm.s -//go:noescape -func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s deleted file mode 100644 index f70b4ac48..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_arm.s +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine,!nacl - -#include "textflag.h" - -// This code was translated into a form compatible with 5a from the public -// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. - -DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff -DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 -DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff -DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff -DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff -GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 - -// Warning: the linker may use R11 to synthesize certain instructions. Please -// take care and verify that no synthetic instructions use it. - -TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 - // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It - // might look like it's only 60 bytes of space but the final four bytes - // will be written by another function.) We need to skip over four - // bytes of stack because that's saving the value of 'g'. - ADD $4, R13, R8 - MOVM.IB [R4-R7], (R8) - MOVM.IA.W (R1), [R2-R5] - MOVW $·poly1305_init_constants_armv6<>(SB), R7 - MOVW R2, R8 - MOVW R2>>26, R9 - MOVW R3>>20, g - MOVW R4>>14, R11 - MOVW R5>>8, R12 - ORR R3<<6, R9, R9 - ORR R4<<12, g, g - ORR R5<<18, R11, R11 - MOVM.IA (R7), [R2-R6] - AND R8, R2, R2 - AND R9, R3, R3 - AND g, R4, R4 - AND R11, R5, R5 - AND R12, R6, R6 - MOVM.IA.W [R2-R6], (R0) - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - MOVM.IA.W [R2-R6], (R0) - MOVM.IA.W (R1), [R2-R5] - MOVM.IA [R2-R6], (R0) - ADD $20, R13, R0 - MOVM.DA (R0), [R4-R7] - RET - -#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ - MOVBU (offset+0)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+0)(Rdst); \ - MOVBU (offset+1)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+1)(Rdst); \ - MOVBU (offset+2)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+2)(Rdst); \ - MOVBU (offset+3)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+3)(Rdst) - -TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 - // Needs 24 bytes of stack for saved registers and then 88 bytes of - // scratch space after that. We assume that 24 bytes at (R13) have - // already been used: four bytes for the link register saved in the - // prelude of poly1305_auth_armv6, four bytes for saving the value of g - // in that function and 16 bytes of scratch space used around - // poly1305_finish_ext_armv6_skip1. - ADD $24, R13, R12 - MOVM.IB [R4-R8, R14], (R12) - MOVW R0, 88(R13) - MOVW R1, 92(R13) - MOVW R2, 96(R13) - MOVW R1, R14 - MOVW R2, R12 - MOVW 56(R0), R8 - WORD $0xe1180008 // TST R8, R8 not working see issue 5921 - EOR R6, R6, R6 - MOVW.EQ $(1<<24), R6 - MOVW R6, 84(R13) - ADD $116, R13, g - MOVM.IA (R0), [R0-R9] - MOVM.IA [R0-R4], (g) - CMP $16, R12 - BLO poly1305_blocks_armv6_done - -poly1305_blocks_armv6_mainloop: - WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 - BEQ poly1305_blocks_armv6_mainloop_aligned - ADD $100, R13, g - MOVW_UNALIGNED(R14, g, R0, 0) - MOVW_UNALIGNED(R14, g, R0, 4) - MOVW_UNALIGNED(R14, g, R0, 8) - MOVW_UNALIGNED(R14, g, R0, 12) - MOVM.IA (g), [R0-R3] - ADD $16, R14 - B poly1305_blocks_armv6_mainloop_loaded - -poly1305_blocks_armv6_mainloop_aligned: - MOVM.IA.W (R14), [R0-R3] - -poly1305_blocks_armv6_mainloop_loaded: - MOVW R0>>26, g - MOVW R1>>20, R11 - MOVW R2>>14, R12 - MOVW R14, 92(R13) - MOVW R3>>8, R4 - ORR R1<<6, g, g - ORR R2<<12, R11, R11 - ORR R3<<18, R12, R12 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, g, g - MOVW 84(R13), R3 - BIC $0xfc000000, R11, R11 - BIC $0xfc000000, R12, R12 - ADD R0, R5, R5 - ADD g, R6, R6 - ORR R3, R4, R4 - ADD R11, R7, R7 - ADD $116, R13, R14 - ADD R12, R8, R8 - ADD R4, R9, R9 - MOVM.IA (R14), [R0-R4] - MULLU R4, R5, (R11, g) - MULLU R3, R5, (R14, R12) - MULALU R3, R6, (R11, g) - MULALU R2, R6, (R14, R12) - MULALU R2, R7, (R11, g) - MULALU R1, R7, (R14, R12) - ADD R4<<2, R4, R4 - ADD R3<<2, R3, R3 - MULALU R1, R8, (R11, g) - MULALU R0, R8, (R14, R12) - MULALU R0, R9, (R11, g) - MULALU R4, R9, (R14, R12) - MOVW g, 76(R13) - MOVW R11, 80(R13) - MOVW R12, 68(R13) - MOVW R14, 72(R13) - MULLU R2, R5, (R11, g) - MULLU R1, R5, (R14, R12) - MULALU R1, R6, (R11, g) - MULALU R0, R6, (R14, R12) - MULALU R0, R7, (R11, g) - MULALU R4, R7, (R14, R12) - ADD R2<<2, R2, R2 - ADD R1<<2, R1, R1 - MULALU R4, R8, (R11, g) - MULALU R3, R8, (R14, R12) - MULALU R3, R9, (R11, g) - MULALU R2, R9, (R14, R12) - MOVW g, 60(R13) - MOVW R11, 64(R13) - MOVW R12, 52(R13) - MOVW R14, 56(R13) - MULLU R0, R5, (R11, g) - MULALU R4, R6, (R11, g) - MULALU R3, R7, (R11, g) - MULALU R2, R8, (R11, g) - MULALU R1, R9, (R11, g) - ADD $52, R13, R0 - MOVM.IA (R0), [R0-R7] - MOVW g>>26, R12 - MOVW R4>>26, R14 - ORR R11<<6, R12, R12 - ORR R5<<6, R14, R14 - BIC $0xfc000000, g, g - BIC $0xfc000000, R4, R4 - ADD.S R12, R0, R0 - ADC $0, R1, R1 - ADD.S R14, R6, R6 - ADC $0, R7, R7 - MOVW R0>>26, R12 - MOVW R6>>26, R14 - ORR R1<<6, R12, R12 - ORR R7<<6, R14, R14 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, R6, R6 - ADD R14<<2, R14, R14 - ADD.S R12, R2, R2 - ADC $0, R3, R3 - ADD R14, g, g - MOVW R2>>26, R12 - MOVW g>>26, R14 - ORR R3<<6, R12, R12 - BIC $0xfc000000, g, R5 - BIC $0xfc000000, R2, R7 - ADD R12, R4, R4 - ADD R14, R0, R0 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R8 - ADD R12, R6, R9 - MOVW 96(R13), R12 - MOVW 92(R13), R14 - MOVW R0, R6 - CMP $32, R12 - SUB $16, R12, R12 - MOVW R12, 96(R13) - BHS poly1305_blocks_armv6_mainloop - -poly1305_blocks_armv6_done: - MOVW 88(R13), R12 - MOVW R5, 20(R12) - MOVW R6, 24(R12) - MOVW R7, 28(R12) - MOVW R8, 32(R12) - MOVW R9, 36(R12) - ADD $48, R13, R0 - MOVM.DA (R0), [R4-R8, R14] - RET - -#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst); \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst) - -#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) - -// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) -TEXT ·poly1305_auth_armv6(SB), $196-16 - // The value 196, just above, is the sum of 64 (the size of the context - // structure) and 132 (the amount of stack needed). - // - // At this point, the stack pointer (R13) has been moved down. It - // points to the saved link register and there's 196 bytes of free - // space above it. - // - // The stack for this function looks like: - // - // +--------------------- - // | - // | 64 bytes of context structure - // | - // +--------------------- - // | - // | 112 bytes for poly1305_blocks_armv6 - // | - // +--------------------- - // | 16 bytes of final block, constructed at - // | poly1305_finish_ext_armv6_skip8 - // +--------------------- - // | four bytes of saved 'g' - // +--------------------- - // | lr, saved by prelude <- R13 points here - // +--------------------- - MOVW g, 4(R13) - - MOVW out+0(FP), R4 - MOVW m+4(FP), R5 - MOVW mlen+8(FP), R6 - MOVW key+12(FP), R7 - - ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 - MOVW R7, R1 - - // poly1305_init_ext_armv6 will write to the stack from R13+4, but - // that's ok because none of the other values have been written yet. - BL poly1305_init_ext_armv6<>(SB) - BIC.S $15, R6, R2 - BEQ poly1305_auth_armv6_noblocks - ADD $136, R13, R0 - MOVW R5, R1 - ADD R2, R5, R5 - SUB R2, R6, R6 - BL poly1305_blocks_armv6<>(SB) - -poly1305_auth_armv6_noblocks: - ADD $136, R13, R0 - MOVW R5, R1 - MOVW R6, R2 - MOVW R4, R3 - - MOVW R0, R5 - MOVW R1, R6 - MOVW R2, R7 - MOVW R3, R8 - AND.S R2, R2, R2 - BEQ poly1305_finish_ext_armv6_noremaining - EOR R0, R0 - ADD $8, R13, R9 // 8 = offset to 16 byte scratch space - MOVW R0, (R9) - MOVW R0, 4(R9) - MOVW R0, 8(R9) - MOVW R0, 12(R9) - WORD $0xe3110003 // TST R1, #3 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_aligned - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8 - MOVWP_UNALIGNED(R1, R9, g) - MOVWP_UNALIGNED(R1, R9, g) - -poly1305_finish_ext_armv6_skip8: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4 - MOVWP_UNALIGNED(R1, R9, g) - -poly1305_finish_ext_armv6_skip4: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHUP_UNALIGNED(R1, R9, g) - B poly1305_finish_ext_armv6_skip2 - -poly1305_finish_ext_armv6_aligned: - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8_aligned - MOVM.IA.W (R1), [g-R11] - MOVM.IA.W [g-R11], (R9) - -poly1305_finish_ext_armv6_skip8_aligned: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4_aligned - MOVW.P 4(R1), g - MOVW.P g, 4(R9) - -poly1305_finish_ext_armv6_skip4_aligned: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHU.P 2(R1), g - MOVH.P g, 2(R9) - -poly1305_finish_ext_armv6_skip2: - WORD $0xe3120001 // TST $1, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip1 - MOVBU.P 1(R1), g - MOVBU.P g, 1(R9) - -poly1305_finish_ext_armv6_skip1: - MOVW $1, R11 - MOVBU R11, 0(R9) - MOVW R11, 56(R5) - MOVW R5, R0 - ADD $8, R13, R1 - MOVW $16, R2 - BL poly1305_blocks_armv6<>(SB) - -poly1305_finish_ext_armv6_noremaining: - MOVW 20(R5), R0 - MOVW 24(R5), R1 - MOVW 28(R5), R2 - MOVW 32(R5), R3 - MOVW 36(R5), R4 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R4 - ADD R12<<2, R12, R12 - ADD R12, R0, R0 - MOVW R0>>26, R12 - BIC $0xfc000000, R0, R0 - ADD R12, R1, R1 - MOVW R1>>26, R12 - BIC $0xfc000000, R1, R1 - ADD R12, R2, R2 - MOVW R2>>26, R12 - BIC $0xfc000000, R2, R2 - ADD R12, R3, R3 - MOVW R3>>26, R12 - BIC $0xfc000000, R3, R3 - ADD R12, R4, R4 - ADD $5, R0, R6 - MOVW R6>>26, R12 - BIC $0xfc000000, R6, R6 - ADD R12, R1, R7 - MOVW R7>>26, R12 - BIC $0xfc000000, R7, R7 - ADD R12, R2, g - MOVW g>>26, R12 - BIC $0xfc000000, g, g - ADD R12, R3, R11 - MOVW $-(1<<26), R12 - ADD R11>>26, R12, R12 - BIC $0xfc000000, R11, R11 - ADD R12, R4, R9 - MOVW R9>>31, R12 - SUB $1, R12 - AND R12, R6, R6 - AND R12, R7, R7 - AND R12, g, g - AND R12, R11, R11 - AND R12, R9, R9 - MVN R12, R12 - AND R12, R0, R0 - AND R12, R1, R1 - AND R12, R2, R2 - AND R12, R3, R3 - AND R12, R4, R4 - ORR R6, R0, R0 - ORR R7, R1, R1 - ORR g, R2, R2 - ORR R11, R3, R3 - ORR R9, R4, R4 - ORR R1<<26, R0, R0 - MOVW R1>>6, R1 - ORR R2<<20, R1, R1 - MOVW R2>>12, R2 - ORR R3<<14, R2, R2 - MOVW R3>>18, R3 - ORR R4<<8, R3, R3 - MOVW 40(R5), R6 - MOVW 44(R5), R7 - MOVW 48(R5), g - MOVW 52(R5), R11 - ADD.S R6, R0, R0 - ADC.S R7, R1, R1 - ADC.S g, R2, R2 - ADC.S R11, R3, R3 - MOVM.IA [R0-R3], (R8) - MOVW R5, R12 - EOR R0, R0, R0 - EOR R1, R1, R1 - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - EOR R7, R7, R7 - MOVM.IA.W [R0-R7], (R12) - MOVM.IA [R0-R7], (R12) - MOVW 4(R13), g - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go deleted file mode 100644 index bab76ef0d..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package poly1305 - -import "encoding/binary" - -const ( - msgBlock = uint32(1 << 24) - finalBlock = uint32(0) -) - -// sumGeneric generates an authenticator for msg using a one-time key and -// puts the 16-byte result into out. This is the generic implementation of -// Sum and should be called if no assembly implementation is available. -func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMACGeneric(key) - h.Write(msg) - h.Sum(out) -} - -func newMACGeneric(key *[32]byte) (h macGeneric) { - h.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff - h.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03 - h.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff - h.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff - h.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff - - h.s[0] = binary.LittleEndian.Uint32(key[16:]) - h.s[1] = binary.LittleEndian.Uint32(key[20:]) - h.s[2] = binary.LittleEndian.Uint32(key[24:]) - h.s[3] = binary.LittleEndian.Uint32(key[28:]) - return -} - -type macGeneric struct { - h, r [5]uint32 - s [4]uint32 - - buffer [TagSize]byte - offset int -} - -func (h *macGeneric) Write(p []byte) (n int, err error) { - n = len(p) - if h.offset > 0 { - remaining := TagSize - h.offset - if n < remaining { - h.offset += copy(h.buffer[h.offset:], p) - return n, nil - } - copy(h.buffer[h.offset:], p[:remaining]) - p = p[remaining:] - h.offset = 0 - updateGeneric(h.buffer[:], msgBlock, &(h.h), &(h.r)) - } - if nn := len(p) - (len(p) % TagSize); nn > 0 { - updateGeneric(p, msgBlock, &(h.h), &(h.r)) - p = p[nn:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return n, nil -} - -func (h *macGeneric) Sum(out *[16]byte) { - H, R := h.h, h.r - if h.offset > 0 { - var buffer [TagSize]byte - copy(buffer[:], h.buffer[:h.offset]) - buffer[h.offset] = 1 // invariant: h.offset < TagSize - updateGeneric(buffer[:], finalBlock, &H, &R) - } - finalizeGeneric(out, &H, &(h.s)) -} - -func updateGeneric(msg []byte, flag uint32, h, r *[5]uint32) { - h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] - r0, r1, r2, r3, r4 := uint64(r[0]), uint64(r[1]), uint64(r[2]), uint64(r[3]), uint64(r[4]) - R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 - - for len(msg) >= TagSize { - // h += msg - h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff - h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff - h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff - h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff - h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | flag - - // h *= r - d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) - d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) - d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) - d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) - d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) - - // h %= p - h0 = uint32(d0) & 0x3ffffff - h1 = uint32(d1) & 0x3ffffff - h2 = uint32(d2) & 0x3ffffff - h3 = uint32(d3) & 0x3ffffff - h4 = uint32(d4) & 0x3ffffff - - h0 += uint32(d4>>26) * 5 - h1 += h0 >> 26 - h0 = h0 & 0x3ffffff - - msg = msg[TagSize:] - } - - h[0], h[1], h[2], h[3], h[4] = h0, h1, h2, h3, h4 -} - -func finalizeGeneric(out *[TagSize]byte, h *[5]uint32, s *[4]uint32) { - h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] - - // h %= p reduction - h2 += h1 >> 26 - h1 &= 0x3ffffff - h3 += h2 >> 26 - h2 &= 0x3ffffff - h4 += h3 >> 26 - h3 &= 0x3ffffff - h0 += 5 * (h4 >> 26) - h4 &= 0x3ffffff - h1 += h0 >> 26 - h0 &= 0x3ffffff - - // h - p - t0 := h0 + 5 - t1 := h1 + (t0 >> 26) - t2 := h2 + (t1 >> 26) - t3 := h3 + (t2 >> 26) - t4 := h4 + (t3 >> 26) - (1 << 26) - t0 &= 0x3ffffff - t1 &= 0x3ffffff - t2 &= 0x3ffffff - t3 &= 0x3ffffff - - // select h if h < p else h - p - t_mask := (t4 >> 31) - 1 - h_mask := ^t_mask - h0 = (h0 & h_mask) | (t0 & t_mask) - h1 = (h1 & h_mask) | (t1 & t_mask) - h2 = (h2 & h_mask) | (t2 & t_mask) - h3 = (h3 & h_mask) | (t3 & t_mask) - h4 = (h4 & h_mask) | (t4 & t_mask) - - // h %= 2^128 - h0 |= h1 << 26 - h1 = ((h1 >> 6) | (h2 << 20)) - h2 = ((h2 >> 12) | (h3 << 14)) - h3 = ((h3 >> 18) | (h4 << 8)) - - // s: the s part of the key - // tag = (h + s) % (2^128) - t := uint64(h0) + uint64(s[0]) - h0 = uint32(t) - t = uint64(h1) + uint64(s[1]) + (t >> 32) - h1 = uint32(t) - t = uint64(h2) + uint64(s[2]) + (t >> 32) - h2 = uint32(t) - t = uint64(h3) + uint64(s[3]) + (t >> 32) - h3 = uint32(t) - - binary.LittleEndian.PutUint32(out[0:], h0) - binary.LittleEndian.PutUint32(out[4:], h1) - binary.LittleEndian.PutUint32(out[8:], h2) - binary.LittleEndian.PutUint32(out[12:], h3) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go deleted file mode 100644 index fcdef46ab..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,!go1.11 !arm,!amd64,!s390x gccgo appengine nacl - -package poly1305 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMAC(key) - h.Write(msg) - h.Sum(out) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go deleted file mode 100644 index ec99e07e9..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,go1.11,!gccgo,!appengine - -package poly1305 - -import ( - "golang.org/x/sys/cpu" -) - -// poly1305vx is an assembly implementation of Poly1305 that uses vector -// instructions. It must only be called if the vector facility (vx) is -// available. -//go:noescape -func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte) - -// poly1305vmsl is an assembly implementation of Poly1305 that uses vector -// instructions, including VMSL. It must only be called if the vector facility (vx) is -// available and if VMSL is supported. -//go:noescape -func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - if cpu.S390X.HasVX { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - if cpu.S390X.HasVXE && len(m) > 256 { - poly1305vmsl(out, mPtr, uint64(len(m)), key) - } else { - poly1305vx(out, mPtr, uint64(len(m)), key) - } - } else { - sumGeneric(out, m, key) - } -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s deleted file mode 100644 index ca5a309d8..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,go1.11,!gccgo,!appengine - -#include "textflag.h" - -// Implementation of Poly1305 using the vector facility (vx). - -// constants -#define MOD26 V0 -#define EX0 V1 -#define EX1 V2 -#define EX2 V3 - -// temporaries -#define T_0 V4 -#define T_1 V5 -#define T_2 V6 -#define T_3 V7 -#define T_4 V8 - -// key (r) -#define R_0 V9 -#define R_1 V10 -#define R_2 V11 -#define R_3 V12 -#define R_4 V13 -#define R5_1 V14 -#define R5_2 V15 -#define R5_3 V16 -#define R5_4 V17 -#define RSAVE_0 R5 -#define RSAVE_1 R6 -#define RSAVE_2 R7 -#define RSAVE_3 R8 -#define RSAVE_4 R9 -#define R5SAVE_1 V28 -#define R5SAVE_2 V29 -#define R5SAVE_3 V30 -#define R5SAVE_4 V31 - -// message block -#define F_0 V18 -#define F_1 V19 -#define F_2 V20 -#define F_3 V21 -#define F_4 V22 - -// accumulator -#define H_0 V23 -#define H_1 V24 -#define H_2 V25 -#define H_3 V26 -#define H_4 V27 - -GLOBL ·keyMask<>(SB), RODATA, $16 -DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f -DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f - -GLOBL ·bswapMask<>(SB), RODATA, $16 -DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 -DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 - -GLOBL ·constants<>(SB), RODATA, $64 -// MOD26 -DATA ·constants<>+0(SB)/8, $0x3ffffff -DATA ·constants<>+8(SB)/8, $0x3ffffff -// EX0 -DATA ·constants<>+16(SB)/8, $0x0006050403020100 -DATA ·constants<>+24(SB)/8, $0x1016151413121110 -// EX1 -DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716 -// EX2 -DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d - -// h = (f*g) % (2**130-5) [partial reduction] -#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ - VMLOF f0, g0, h0 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g2, h2 \ - VMLOF f0, g3, h3 \ - VMLOF f0, g4, h4 \ - VMLOF f1, g54, T_0 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g1, T_2 \ - VMLOF f1, g2, T_3 \ - VMLOF f1, g3, T_4 \ - VMALOF f2, g53, h0, h0 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g0, h2, h2 \ - VMALOF f2, g1, h3, h3 \ - VMALOF f2, g2, h4, h4 \ - VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g54, T_2, T_2 \ - VMALOF f3, g0, T_3, T_3 \ - VMALOF f3, g1, T_4, T_4 \ - VMALOF f4, g51, h0, h0 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g53, h2, h2 \ - VMALOF f4, g54, h3, h3 \ - VMALOF f4, g0, h4, h4 \ - VAG T_0, h0, h0 \ - VAG T_1, h1, h1 \ - VAG T_2, h2, h2 \ - VAG T_3, h3, h3 \ - VAG T_4, h4, h4 - -// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4 -#define REDUCE(h0, h1, h2, h3, h4) \ - VESRLG $26, h0, T_0 \ - VESRLG $26, h3, T_1 \ - VN MOD26, h0, h0 \ - VN MOD26, h3, h3 \ - VAG T_0, h1, h1 \ - VAG T_1, h4, h4 \ - VESRLG $26, h1, T_2 \ - VESRLG $26, h4, T_3 \ - VN MOD26, h1, h1 \ - VN MOD26, h4, h4 \ - VESLG $2, T_3, T_4 \ - VAG T_3, T_4, T_4 \ - VAG T_2, h2, h2 \ - VAG T_4, h0, h0 \ - VESRLG $26, h2, T_0 \ - VESRLG $26, h0, T_1 \ - VN MOD26, h2, h2 \ - VN MOD26, h0, h0 \ - VAG T_0, h3, h3 \ - VAG T_1, h1, h1 \ - VESRLG $26, h3, T_2 \ - VN MOD26, h3, h3 \ - VAG T_2, h4, h4 - -// expand in0 into d[0] and in1 into d[1] -#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VGBM $0x0707, d1 \ // d1=tmp - VPERM in0, in1, EX2, d4 \ - VPERM in0, in1, EX0, d0 \ - VPERM in0, in1, EX1, d2 \ - VN d1, d4, d4 \ - VESRLG $26, d0, d1 \ - VESRLG $30, d2, d3 \ - VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ - VN MOD26, d1, d1 \ - VN MOD26, d2, d2 \ - VN MOD26, d3, d3 - -// pack h4:h0 into h1:h0 (no carry) -#define PACK(h0, h1, h2, h3, h4) \ - VESLG $26, h1, h1 \ - VESLG $26, h3, h3 \ - VO h0, h1, h0 \ - VO h2, h3, h2 \ - VESLG $4, h2, h2 \ - VLEIB $7, $48, h1 \ - VSLB h1, h2, h2 \ - VO h0, h2, h0 \ - VLEIB $7, $104, h1 \ - VSLB h1, h4, h3 \ - VO h3, h0, h0 \ - VLEIB $7, $24, h1 \ - VSRLB h1, h4, h1 - -// if h > 2**130-5 then h -= 2**130-5 -#define MOD(h0, h1, t0, t1, t2) \ - VZERO t0 \ - VLEIG $1, $5, t0 \ - VACCQ h0, t0, t1 \ - VAQ h0, t0, t0 \ - VONE t2 \ - VLEIG $1, $-4, t2 \ - VAQ t2, t1, t1 \ - VACCQ h1, t1, t1 \ - VONE t2 \ - VAQ t2, t1, t1 \ - VN h0, t1, t2 \ - VNC t0, t1, t1 \ - VO t1, t2, h0 - -// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305vx(SB), $0-32 - // This code processes up to 2 blocks (32 bytes) per iteration - // using the algorithm described in: - // NEON crypto, Daniel J. Bernstein & Peter Schwabe - // https://cryptojedi.org/papers/neoncrypto-20120320.pdf - LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key - - // load MOD26, EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), MOD26, EX2 - - // setup r - VL (R4), T_0 - MOVD $·keyMask<>(SB), R6 - VL (R6), T_1 - VN T_0, T_1, T_0 - EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4) - - // setup r*5 - VLEIG $0, $5, T_0 - VLEIG $1, $5, T_0 - - // store r (for final block) - VMLOF T_0, R_1, R5SAVE_1 - VMLOF T_0, R_2, R5SAVE_2 - VMLOF T_0, R_3, R5SAVE_3 - VMLOF T_0, R_4, R5SAVE_4 - VLGVG $0, R_0, RSAVE_0 - VLGVG $0, R_1, RSAVE_1 - VLGVG $0, R_2, RSAVE_2 - VLGVG $0, R_3, RSAVE_3 - VLGVG $0, R_4, RSAVE_4 - - // skip r**2 calculation - CMPBLE R3, $16, skip - - // calculate r**2 - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4) - REDUCE(H_0, H_1, H_2, H_3, H_4) - VLEIG $0, $5, T_0 - VLEIG $1, $5, T_0 - VMLOF T_0, H_1, R5_1 - VMLOF T_0, H_2, R5_2 - VMLOF T_0, H_3, R5_3 - VMLOF T_0, H_4, R5_4 - VLR H_0, R_0 - VLR H_1, R_1 - VLR H_2, R_2 - VLR H_3, R_3 - VLR H_4, R_4 - - // initialize h - VZERO H_0 - VZERO H_1 - VZERO H_2 - VZERO H_3 - VZERO H_4 - -loop: - CMPBLE R3, $32, b2 - VLM (R2), T_0, T_1 - SUB $32, R3 - MOVD $32(R2), R2 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - VLEIB $4, $1, F_4 - VLEIB $12, $1, F_4 - -multiply: - VAG H_0, F_0, F_0 - VAG H_1, F_1, F_1 - VAG H_2, F_2, F_2 - VAG H_3, F_3, F_3 - VAG H_4, F_4, F_4 - MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) - REDUCE(H_0, H_1, H_2, H_3, H_4) - CMPBNE R3, $0, loop - -finish: - // sum vectors - VZERO T_0 - VSUMQG H_0, T_0, H_0 - VSUMQG H_1, T_0, H_1 - VSUMQG H_2, T_0, H_2 - VSUMQG H_3, T_0, H_3 - VSUMQG H_4, T_0, H_4 - - // h may be >= 2*(2**130-5) so we need to reduce it again - REDUCE(H_0, H_1, H_2, H_3, H_4) - - // carry h1->h4 - VESRLG $26, H_1, T_1 - VN MOD26, H_1, H_1 - VAQ T_1, H_2, H_2 - VESRLG $26, H_2, T_2 - VN MOD26, H_2, H_2 - VAQ T_2, H_3, H_3 - VESRLG $26, H_3, T_3 - VN MOD26, H_3, H_3 - VAQ T_3, H_4, H_4 - - // h is now < 2*(2**130-5) - // pack h into h1 (hi) and h0 (lo) - PACK(H_0, H_1, H_2, H_3, H_4) - - // if h > 2**130-5 then h -= 2**130-5 - MOD(H_0, H_1, T_0, T_1, T_2) - - // h += s - MOVD $·bswapMask<>(SB), R5 - VL (R5), T_1 - VL 16(R4), T_0 - VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) - VAQ T_0, H_0, H_0 - VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little) - VST H_0, (R1) - - RET - -b2: - CMPBLE R3, $16, b1 - - // 2 blocks remaining - SUB $17, R3 - VL (R2), T_0 - VLL R3, 16(R2), T_1 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_1 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, F_4 - VLEIB $4, $1, F_4 - - // setup [r²,r] - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, RSAVE_3, R_3 - VLVGG $1, RSAVE_4, R_4 - VPDI $0, R5_1, R5SAVE_1, R5_1 - VPDI $0, R5_2, R5SAVE_2, R5_2 - VPDI $0, R5_3, R5SAVE_3, R5_3 - VPDI $0, R5_4, R5SAVE_4, R5_4 - - MOVD $0, R3 - BR multiply - -skip: - VZERO H_0 - VZERO H_1 - VZERO H_2 - VZERO H_3 - VZERO H_4 - - CMPBEQ R3, $0, finish - -b1: - // 1 block remaining - SUB $1, R3 - VLL R3, (R2), T_0 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_0 - VZERO T_1 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, F_4 - VLEIG $1, $1, R_0 - VZERO R_1 - VZERO R_2 - VZERO R_3 - VZERO R_4 - VZERO R5_1 - VZERO R5_2 - VZERO R5_3 - VZERO R5_4 - - // setup [r, 1] - VLVGG $0, RSAVE_0, R_0 - VLVGG $0, RSAVE_1, R_1 - VLVGG $0, RSAVE_2, R_2 - VLVGG $0, RSAVE_3, R_3 - VLVGG $0, RSAVE_4, R_4 - VPDI $0, R5SAVE_1, R5_1, R5_1 - VPDI $0, R5SAVE_2, R5_2, R5_2 - VPDI $0, R5SAVE_3, R5_3, R5_3 - VPDI $0, R5SAVE_4, R5_4, R5_4 - - MOVD $0, R3 - BR multiply diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s deleted file mode 100644 index e60bbc1d7..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,go1.11,!gccgo,!appengine - -#include "textflag.h" - -// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction. - -// constants -#define EX0 V1 -#define EX1 V2 -#define EX2 V3 - -// temporaries -#define T_0 V4 -#define T_1 V5 -#define T_2 V6 -#define T_3 V7 -#define T_4 V8 -#define T_5 V9 -#define T_6 V10 -#define T_7 V11 -#define T_8 V12 -#define T_9 V13 -#define T_10 V14 - -// r**2 & r**4 -#define R_0 V15 -#define R_1 V16 -#define R_2 V17 -#define R5_1 V18 -#define R5_2 V19 -// key (r) -#define RSAVE_0 R7 -#define RSAVE_1 R8 -#define RSAVE_2 R9 -#define R5SAVE_1 R10 -#define R5SAVE_2 R11 - -// message block -#define M0 V20 -#define M1 V21 -#define M2 V22 -#define M3 V23 -#define M4 V24 -#define M5 V25 - -// accumulator -#define H0_0 V26 -#define H1_0 V27 -#define H2_0 V28 -#define H0_1 V29 -#define H1_1 V30 -#define H2_1 V31 - -GLOBL ·keyMask<>(SB), RODATA, $16 -DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f -DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f - -GLOBL ·bswapMask<>(SB), RODATA, $16 -DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 -DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 - -GLOBL ·constants<>(SB), RODATA, $48 -// EX0 -DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+8(SB)/8, $0x0000050403020100 -// EX1 -DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+24(SB)/8, $0x00000a0908070605 -// EX2 -DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b - -GLOBL ·c<>(SB), RODATA, $48 -// EX0 -DATA ·c<>+0(SB)/8, $0x0000050403020100 -DATA ·c<>+8(SB)/8, $0x0000151413121110 -// EX1 -DATA ·c<>+16(SB)/8, $0x00000a0908070605 -DATA ·c<>+24(SB)/8, $0x00001a1918171615 -// EX2 -DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b -DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b - -GLOBL ·reduce<>(SB), RODATA, $32 -// 44 bit -DATA ·reduce<>+0(SB)/8, $0x0 -DATA ·reduce<>+8(SB)/8, $0xfffffffffff -// 42 bit -DATA ·reduce<>+16(SB)/8, $0x0 -DATA ·reduce<>+24(SB)/8, $0x3ffffffffff - -// h = (f*g) % (2**130-5) [partial reduction] -// uses T_0...T_9 temporary registers -// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 -// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2 -#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \ - \ // Eliminate the dependency for the last 2 VMSLs - VMSLG m02_0, r_2, m4_2, m4_2 \ - VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined - VMSLG m02_0, r_0, m4_0, m4_0 \ - VMSLG m02_1, r5_2, V0, T_0 \ - VMSLG m02_0, r_1, m4_1, m4_1 \ - VMSLG m02_1, r_0, V0, T_1 \ - VMSLG m02_1, r_1, V0, T_2 \ - VMSLG m02_2, r5_1, V0, T_3 \ - VMSLG m02_2, r5_2, V0, T_4 \ - VMSLG m13_0, r_0, m5_0, m5_0 \ - VMSLG m13_1, r5_2, V0, T_5 \ - VMSLG m13_0, r_1, m5_1, m5_1 \ - VMSLG m13_1, r_0, V0, T_6 \ - VMSLG m13_1, r_1, V0, T_7 \ - VMSLG m13_2, r5_1, V0, T_8 \ - VMSLG m13_2, r5_2, V0, T_9 \ - VMSLG m02_2, r_0, m4_2, m4_2 \ - VMSLG m13_2, r_0, m5_2, m5_2 \ - VAQ m4_0, T_0, m02_0 \ - VAQ m4_1, T_1, m02_1 \ - VAQ m5_0, T_5, m13_0 \ - VAQ m5_1, T_6, m13_1 \ - VAQ m02_0, T_3, m02_0 \ - VAQ m02_1, T_4, m02_1 \ - VAQ m13_0, T_8, m13_0 \ - VAQ m13_1, T_9, m13_1 \ - VAQ m4_2, T_2, m02_2 \ - VAQ m5_2, T_7, m13_2 \ - -// SQUARE uses three limbs of r and r_2*5 to output square of r -// uses T_1, T_5 and T_7 temporary registers -// input: r_0, r_1, r_2, r5_2 -// temp: TEMP0, TEMP1, TEMP2 -// output: p0, p1, p2 -#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \ - VMSLG r_0, r_0, p0, p0 \ - VMSLG r_1, r5_2, V0, TEMP0 \ - VMSLG r_2, r5_2, p1, p1 \ - VMSLG r_0, r_1, V0, TEMP1 \ - VMSLG r_1, r_1, p2, p2 \ - VMSLG r_0, r_2, V0, TEMP2 \ - VAQ TEMP0, p0, p0 \ - VAQ TEMP1, p1, p1 \ - VAQ TEMP2, p2, p2 \ - VAQ TEMP0, p0, p0 \ - VAQ TEMP1, p1, p1 \ - VAQ TEMP2, p2, p2 \ - -// carry h0->h1->h2->h0 || h3->h4->h5->h3 -// uses T_2, T_4, T_5, T_7, T_8, T_9 -// t6, t7, t8, t9, t10, t11 -// input: h0, h1, h2, h3, h4, h5 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11 -// output: h0, h1, h2, h3, h4, h5 -#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \ - VLM (R12), t6, t7 \ // 44 and 42 bit clear mask - VLEIB $7, $0x28, t10 \ // 5 byte shift mask - VREPIB $4, t8 \ // 4 bit shift mask - VREPIB $2, t11 \ // 2 bit shift mask - VSRLB t10, h0, t0 \ // h0 byte shift - VSRLB t10, h1, t1 \ // h1 byte shift - VSRLB t10, h2, t2 \ // h2 byte shift - VSRLB t10, h3, t3 \ // h3 byte shift - VSRLB t10, h4, t4 \ // h4 byte shift - VSRLB t10, h5, t5 \ // h5 byte shift - VSRL t8, t0, t0 \ // h0 bit shift - VSRL t8, t1, t1 \ // h2 bit shift - VSRL t11, t2, t2 \ // h2 bit shift - VSRL t8, t3, t3 \ // h3 bit shift - VSRL t8, t4, t4 \ // h4 bit shift - VESLG $2, t2, t9 \ // h2 carry x5 - VSRL t11, t5, t5 \ // h5 bit shift - VN t6, h0, h0 \ // h0 clear carry - VAQ t2, t9, t2 \ // h2 carry x5 - VESLG $2, t5, t9 \ // h5 carry x5 - VN t6, h1, h1 \ // h1 clear carry - VN t7, h2, h2 \ // h2 clear carry - VAQ t5, t9, t5 \ // h5 carry x5 - VN t6, h3, h3 \ // h3 clear carry - VN t6, h4, h4 \ // h4 clear carry - VN t7, h5, h5 \ // h5 clear carry - VAQ t0, h1, h1 \ // h0->h1 - VAQ t3, h4, h4 \ // h3->h4 - VAQ t1, h2, h2 \ // h1->h2 - VAQ t4, h5, h5 \ // h4->h5 - VAQ t2, h0, h0 \ // h2->h0 - VAQ t5, h3, h3 \ // h5->h3 - VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves - VREPG $1, t7, t7 \ - VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5] - VSLDB $8, h1, h1, h1 \ - VSLDB $8, h2, h2, h2 \ - VO h0, h3, h3 \ - VO h1, h4, h4 \ - VO h2, h5, h5 \ - VESRLG $44, h3, t0 \ // 44 bit shift right - VESRLG $44, h4, t1 \ - VESRLG $42, h5, t2 \ - VN t6, h3, h3 \ // clear carry bits - VN t6, h4, h4 \ - VN t7, h5, h5 \ - VESLG $2, t2, t9 \ // multiply carry by 5 - VAQ t9, t2, t2 \ - VAQ t0, h4, h4 \ - VAQ t1, h5, h5 \ - VAQ t2, h3, h3 \ - -// carry h0->h1->h2->h0 -// input: h0, h1, h2 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8 -// output: h0, h1, h2 -#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \ - VLEIB $7, $0x28, t3 \ // 5 byte shift mask - VREPIB $4, t4 \ // 4 bit shift mask - VREPIB $2, t7 \ // 2 bit shift mask - VGBM $0x003F, t5 \ // mask to clear carry bits - VSRLB t3, h0, t0 \ - VSRLB t3, h1, t1 \ - VSRLB t3, h2, t2 \ - VESRLG $4, t5, t5 \ // 44 bit clear mask - VSRL t4, t0, t0 \ - VSRL t4, t1, t1 \ - VSRL t7, t2, t2 \ - VESRLG $2, t5, t6 \ // 42 bit clear mask - VESLG $2, t2, t8 \ - VAQ t8, t2, t2 \ - VN t5, h0, h0 \ - VN t5, h1, h1 \ - VN t6, h2, h2 \ - VAQ t0, h1, h1 \ - VAQ t1, h2, h2 \ - VAQ t2, h0, h0 \ - VSRLB t3, h0, t0 \ - VSRLB t3, h1, t1 \ - VSRLB t3, h2, t2 \ - VSRL t4, t0, t0 \ - VSRL t4, t1, t1 \ - VSRL t7, t2, t2 \ - VN t5, h0, h0 \ - VN t5, h1, h1 \ - VESLG $2, t2, t8 \ - VN t6, h2, h2 \ - VAQ t0, h1, h1 \ - VAQ t8, t2, t2 \ - VAQ t1, h2, h2 \ - VAQ t2, h0, h0 \ - -// expands two message blocks into the lower halfs of the d registers -// moves the contents of the d registers into upper halfs -// input: in1, in2, d0, d1, d2, d3, d4, d5 -// temp: TEMP0, TEMP1, TEMP2, TEMP3 -// output: d0, d1, d2, d3, d4, d5 -#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \ - VGBM $0xff3f, TEMP0 \ - VGBM $0xff1f, TEMP1 \ - VESLG $4, d1, TEMP2 \ - VESLG $4, d4, TEMP3 \ - VESRLG $4, TEMP0, TEMP0 \ - VPERM in1, d0, EX0, d0 \ - VPERM in2, d3, EX0, d3 \ - VPERM in1, d2, EX2, d2 \ - VPERM in2, d5, EX2, d5 \ - VPERM in1, TEMP2, EX1, d1 \ - VPERM in2, TEMP3, EX1, d4 \ - VN TEMP0, d0, d0 \ - VN TEMP0, d3, d3 \ - VESRLG $4, d1, d1 \ - VESRLG $4, d4, d4 \ - VN TEMP1, d2, d2 \ - VN TEMP1, d5, d5 \ - VN TEMP0, d1, d1 \ - VN TEMP0, d4, d4 \ - -// expands one message block into the lower halfs of the d registers -// moves the contents of the d registers into upper halfs -// input: in, d0, d1, d2 -// temp: TEMP0, TEMP1, TEMP2 -// output: d0, d1, d2 -#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \ - VGBM $0xff3f, TEMP0 \ - VESLG $4, d1, TEMP2 \ - VGBM $0xff1f, TEMP1 \ - VPERM in, d0, EX0, d0 \ - VESRLG $4, TEMP0, TEMP0 \ - VPERM in, d2, EX2, d2 \ - VPERM in, TEMP2, EX1, d1 \ - VN TEMP0, d0, d0 \ - VN TEMP1, d2, d2 \ - VESRLG $4, d1, d1 \ - VN TEMP0, d1, d1 \ - -// pack h2:h0 into h1:h0 (no carry) -// input: h0, h1, h2 -// output: h0, h1, h2 -#define PACK(h0, h1, h2) \ - VMRLG h1, h2, h2 \ // copy h1 to upper half h2 - VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20 - VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1 - VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1 - VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1 - VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1) - VLEIG $0, $0, h2 \ // clear upper half of h2 - VESRLG $40, h2, h1 \ // h1 now has upper two bits of result - VLEIB $7, $88, h1 \ // for byte shift (11 bytes) - VSLB h1, h2, h2 \ // shift h2 11 bytes to the left - VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1 - VLEIG $0, $0, h1 \ // clear upper half of h1 - -// if h > 2**130-5 then h -= 2**130-5 -// input: h0, h1 -// temp: t0, t1, t2 -// output: h0 -#define MOD(h0, h1, t0, t1, t2) \ - VZERO t0 \ - VLEIG $1, $5, t0 \ - VACCQ h0, t0, t1 \ - VAQ h0, t0, t0 \ - VONE t2 \ - VLEIG $1, $-4, t2 \ - VAQ t2, t1, t1 \ - VACCQ h1, t1, t1 \ - VONE t2 \ - VAQ t2, t1, t1 \ - VN h0, t1, t2 \ - VNC t0, t1, t1 \ - VO t1, t2, h0 \ - -// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305vmsl(SB), $0-32 - // This code processes 6 + up to 4 blocks (32 bytes) per iteration - // using the algorithm described in: - // NEON crypto, Daniel J. Bernstein & Peter Schwabe - // https://cryptojedi.org/papers/neoncrypto-20120320.pdf - // And as moddified for VMSL as described in - // Accelerating Poly1305 Cryptographic Message Authentication on the z14 - // O'Farrell et al, CASCON 2017, p48-55 - // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht - - LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key - VZERO V0 // c - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 // c - - // setup r - VL (R4), T_0 - MOVD $·keyMask<>(SB), R6 - VL (R6), T_1 - VN T_0, T_1, T_0 - VZERO T_2 // limbs for r - VZERO T_3 - VZERO T_4 - EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7) - - // T_2, T_3, T_4: [0, r] - - // setup r*20 - VLEIG $0, $0, T_0 - VLEIG $1, $20, T_0 // T_0: [0, 20] - VZERO T_5 - VZERO T_6 - VMSLG T_0, T_3, T_5, T_5 - VMSLG T_0, T_4, T_6, T_6 - - // store r for final block in GR - VLGVG $1, T_2, RSAVE_0 // c - VLGVG $1, T_3, RSAVE_1 // c - VLGVG $1, T_4, RSAVE_2 // c - VLGVG $1, T_5, R5SAVE_1 // c - VLGVG $1, T_6, R5SAVE_2 // c - - // initialize h - VZERO H0_0 - VZERO H1_0 - VZERO H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - // initialize pointer for reduce constants - MOVD $·reduce<>(SB), R12 - - // calculate r**2 and 20*(r**2) - VZERO R_0 - VZERO R_1 - VZERO R_2 - SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7) - REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1) - VZERO R5_1 - VZERO R5_2 - VMSLG T_0, R_1, R5_1, R5_1 - VMSLG T_0, R_2, R5_2, R5_2 - - // skip r**4 calculation if 3 blocks or less - CMPBLE R3, $48, b4 - - // calculate r**4 and 20*(r**4) - VZERO T_8 - VZERO T_9 - VZERO T_10 - SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7) - REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1) - VZERO T_2 - VZERO T_3 - VMSLG T_0, T_9, T_2, T_2 - VMSLG T_0, T_10, T_3, T_3 - - // put r**2 to the right and r**4 to the left of R_0, R_1, R_2 - VSLDB $8, T_8, T_8, T_8 - VSLDB $8, T_9, T_9, T_9 - VSLDB $8, T_10, T_10, T_10 - VSLDB $8, T_2, T_2, T_2 - VSLDB $8, T_3, T_3, T_3 - - VO T_8, R_0, R_0 - VO T_9, R_1, R_1 - VO T_10, R_2, R_2 - VO T_2, R5_1, R5_1 - VO T_3, R5_2, R5_2 - - CMPBLE R3, $80, load // less than or equal to 5 blocks in message - - // 6(or 5+1) blocks - SUB $81, R3 - VLM (R2), M0, M4 - VLL R3, 80(R2), M5 - ADD $1, R3 - MOVBZ $1, R0 - CMPBGE R3, $16, 2(PC) - VLVGB R3, R0, M5 - MOVD $96(R2), R2 - EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - VLEIB $2, $1, H2_0 - VLEIB $2, $1, H2_1 - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO T_4 - VZERO T_10 - EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3) - VLR T_4, M4 - VLEIB $10, $1, M2 - CMPBLT R3, $16, 2(PC) - VLEIB $10, $1, T_10 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - SUB $16, R3 - CMPBLE R3, $0, square - -load: - // load EX0, EX1 and EX2 - MOVD $·c<>(SB), R5 - VLM (R5), EX0, EX2 - -loop: - CMPBLE R3, $64, add // b4 // last 4 or less blocks left - - // next 4 full blocks - VLM (R2), M2, M5 - SUB $64, R3 - MOVD $64(R2), R2 - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9) - - // expacc in-lined to create [m2, m3] limbs - VGBM $0x3f3f, T_0 // 44 bit clear mask - VGBM $0x1f1f, T_1 // 40 bit clear mask - VPERM M2, M3, EX0, T_3 - VESRLG $4, T_0, T_0 // 44 bit clear mask ready - VPERM M2, M3, EX1, T_4 - VPERM M2, M3, EX2, T_5 - VN T_0, T_3, T_3 - VESRLG $4, T_4, T_4 - VN T_1, T_5, T_5 - VN T_0, T_4, T_4 - VMRHG H0_1, T_3, H0_0 - VMRHG H1_1, T_4, H1_0 - VMRHG H2_1, T_5, H2_0 - VMRLG H0_1, T_3, H0_1 - VMRLG H1_1, T_4, H1_1 - VMRLG H2_1, T_5, H2_1 - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - VPERM M4, M5, EX0, T_3 - VPERM M4, M5, EX1, T_4 - VPERM M4, M5, EX2, T_5 - VN T_0, T_3, T_3 - VESRLG $4, T_4, T_4 - VN T_1, T_5, T_5 - VN T_0, T_4, T_4 - VMRHG V0, T_3, M0 - VMRHG V0, T_4, M1 - VMRHG V0, T_5, M2 - VMRLG V0, T_3, M3 - VMRLG V0, T_4, M4 - VMRLG V0, T_5, M5 - VLEIB $10, $1, M2 - VLEIB $10, $1, M5 - - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - CMPBNE R3, $0, loop - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - // load EX0, EX1, EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // sum vectors - VAQ H0_0, H0_1, H0_0 - VAQ H1_0, H1_1, H1_0 - VAQ H2_0, H2_1, H2_0 - - // h may be >= 2*(2**130-5) so we need to reduce it again - // M0...M4 are used as temps here - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - -next: // carry h1->h2 - VLEIB $7, $0x28, T_1 - VREPIB $4, T_2 - VGBM $0x003F, T_3 - VESRLG $4, T_3 - - // byte shift - VSRLB T_1, H1_0, T_4 - - // bit shift - VSRL T_2, T_4, T_4 - - // clear h1 carry bits - VN T_3, H1_0, H1_0 - - // add carry - VAQ T_4, H2_0, H2_0 - - // h is now < 2*(2**130-5) - // pack h into h1 (hi) and h0 (lo) - PACK(H0_0, H1_0, H2_0) - - // if h > 2**130-5 then h -= 2**130-5 - MOD(H0_0, H1_0, T_0, T_1, T_2) - - // h += s - MOVD $·bswapMask<>(SB), R5 - VL (R5), T_1 - VL 16(R4), T_0 - VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) - VAQ T_0, H0_0, H0_0 - VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little) - VST H0_0, (R1) - RET - -add: - // load EX0, EX1, EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - CMPBLE R3, $64, b4 - -b4: - CMPBLE R3, $48, b3 // 3 blocks or less - - // 4(3+1) blocks remaining - SUB $49, R3 - VLM (R2), M0, M2 - VLL R3, 48(R2), M3 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M3 - MOVD $64(R2), R2 - EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - VZERO M0 - VZERO M1 - VZERO M4 - VZERO M5 - VZERO T_4 - VZERO T_10 - EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3) - VLR T_4, M2 - VLEIB $10, $1, M4 - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_10 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - SUB $16, R3 - CMPBLE R3, $0, square // this condition must always hold true! - -b3: - CMPBLE R3, $32, b2 - - // 3 blocks remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5) - - SUB $33, R3 - VLM (R2), M0, M1 - VLL R3, 32(R2), M2 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M2 - - // H += m0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6) - VLEIB $10, $1, T_3 - VAG H0_0, T_1, H0_0 - VAG H1_0, T_2, H1_0 - VAG H2_0, T_3, H2_0 - - VZERO M0 - VZERO M3 - VZERO M4 - VZERO M5 - VZERO T_10 - - // (H+m0)*r - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9) - - // H += m1 - VZERO V0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6) - VLEIB $10, $1, T_3 - VAQ H0_0, T_1, H0_0 - VAQ H1_0, T_2, H1_0 - VAQ H2_0, T_3, H2_0 - REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) - - // [H, m2] * [r**2, r] - EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3) - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, H2_0 - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10) - SUB $16, R3 - CMPBLE R3, $0, next // this condition must always hold true! - -b2: - CMPBLE R3, $16, b1 - - // 2 blocks remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - // move h to the left and 0s at the right - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - - // get message blocks and append 1 to start - SUB $17, R3 - VL (R2), M0 - VLL R3, 16(R2), M1 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M1 - VZERO T_6 - VZERO T_7 - VZERO T_8 - EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3) - EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3) - VLEIB $2, $1, T_8 - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_8 - - // add [m0, m1] to h - VAG H0_0, T_6, H0_0 - VAG H1_0, T_7, H1_0 - VAG H2_0, T_8, H2_0 - - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - VZERO T_10 - VZERO M0 - - // at this point R_0 .. R5_2 look like [r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) - SUB $16, R3, R3 - CMPBLE R3, $0, next - -b1: - CMPBLE R3, $0, next - - // 1 block remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - - // set up [0, m0] limbs - SUB $1, R3 - VLL R3, (R2), M0 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m] - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_3 - - // h+m0 - VAQ H0_0, T_1, H0_0 - VAQ H1_0, T_2, H1_0 - VAQ H2_0, T_3, H2_0 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - - BR next - -square: - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // (h0*r**2) + (h1*r) - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - BR next diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go deleted file mode 100644 index 51f740500..000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package agent implements the ssh-agent protocol, and provides both -// a client and a server. The client can talk to a standard ssh-agent -// that uses UNIX sockets, and one could implement an alternative -// ssh-agent process using the sample server. -// -// References: -// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 -package agent // import "golang.org/x/crypto/ssh/agent" - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "sync" - - "crypto" - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// SignatureFlags represent additional flags that can be passed to the signature -// requests an defined in [PROTOCOL.agent] section 4.5.1. -type SignatureFlags uint32 - -// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3. -const ( - SignatureFlagReserved SignatureFlags = 1 << iota - SignatureFlagRsaSha256 - SignatureFlagRsaSha512 -) - -// Agent represents the capabilities of an ssh-agent. -type Agent interface { - // List returns the identities known to the agent. - List() ([]*Key, error) - - // Sign has the agent sign the data using a protocol 2 key as defined - // in [PROTOCOL.agent] section 2.6.2. - Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) - - // Add adds a private key to the agent. - Add(key AddedKey) error - - // Remove removes all identities with the given public key. - Remove(key ssh.PublicKey) error - - // RemoveAll removes all identities. - RemoveAll() error - - // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. - Lock(passphrase []byte) error - - // Unlock undoes the effect of Lock - Unlock(passphrase []byte) error - - // Signers returns signers for all the known keys. - Signers() ([]ssh.Signer, error) -} - -type ExtendedAgent interface { - Agent - - // SignWithFlags signs like Sign, but allows for additional flags to be sent/received - SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) - - // Extension processes a custom extension request. Standard-compliant agents are not - // required to support any extensions, but this method allows agents to implement - // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7. - // If agent extensions are unsupported entirely this method MUST return an - // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in - // the request is unsupported by the agent then ErrExtensionUnsupported MUST be - // returned. - // - // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents - // of the response are unspecified (including the type of the message), the complete - // response will be returned as a []byte slice, including the "type" byte of the message. - Extension(extensionType string, contents []byte) ([]byte, error) -} - -// ConstraintExtension describes an optional constraint defined by users. -type ConstraintExtension struct { - // ExtensionName consist of a UTF-8 string suffixed by the - // implementation domain following the naming scheme defined - // in Section 4.2 of [RFC4251], e.g. "foo@example.com". - ExtensionName string - // ExtensionDetails contains the actual content of the extended - // constraint. - ExtensionDetails []byte -} - -// AddedKey describes an SSH key to be added to an Agent. -type AddedKey struct { - // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or - // *ecdsa.PrivateKey, which will be inserted into the agent. - PrivateKey interface{} - // Certificate, if not nil, is communicated to the agent and will be - // stored with the key. - Certificate *ssh.Certificate - // Comment is an optional, free-form string. - Comment string - // LifetimeSecs, if not zero, is the number of seconds that the - // agent will store the key for. - LifetimeSecs uint32 - // ConfirmBeforeUse, if true, requests that the agent confirm with the - // user before each use of this key. - ConfirmBeforeUse bool - // ConstraintExtensions are the experimental or private-use constraints - // defined by users. - ConstraintExtensions []ConstraintExtension -} - -// See [PROTOCOL.agent], section 3. -const ( - agentRequestV1Identities = 1 - agentRemoveAllV1Identities = 9 - - // 3.2 Requests from client to agent for protocol 2 key operations - agentAddIdentity = 17 - agentRemoveIdentity = 18 - agentRemoveAllIdentities = 19 - agentAddIDConstrained = 25 - - // 3.3 Key-type independent requests from client to agent - agentAddSmartcardKey = 20 - agentRemoveSmartcardKey = 21 - agentLock = 22 - agentUnlock = 23 - agentAddSmartcardKeyConstrained = 26 - - // 3.7 Key constraint identifiers - agentConstrainLifetime = 1 - agentConstrainConfirm = 2 - agentConstrainExtension = 3 -) - -// maxAgentResponseBytes is the maximum agent reply size that is accepted. This -// is a sanity check, not a limit in the spec. -const maxAgentResponseBytes = 16 << 20 - -// Agent messages: -// These structures mirror the wire format of the corresponding ssh agent -// messages found in [PROTOCOL.agent]. - -// 3.4 Generic replies from agent to client -const agentFailure = 5 - -type failureAgentMsg struct{} - -const agentSuccess = 6 - -type successAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentRequestIdentities = 11 - -type requestIdentitiesAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentIdentitiesAnswer = 12 - -type identitiesAnswerAgentMsg struct { - NumKeys uint32 `sshtype:"12"` - Keys []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 2.6.2. -const agentSignRequest = 13 - -type signRequestAgentMsg struct { - KeyBlob []byte `sshtype:"13"` - Data []byte - Flags uint32 -} - -// See [PROTOCOL.agent], section 2.6.2. - -// 3.6 Replies from agent to client for protocol 2 key operations -const agentSignResponse = 14 - -type signResponseAgentMsg struct { - SigBlob []byte `sshtype:"14"` -} - -type publicKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -// 3.7 Key constraint identifiers -type constrainLifetimeAgentMsg struct { - LifetimeSecs uint32 `sshtype:"1"` -} - -type constrainExtensionAgentMsg struct { - ExtensionName string `sshtype:"3"` - ExtensionDetails []byte - - // Rest is a field used for parsing, not part of message - Rest []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 4.7 -const agentExtension = 27 -const agentExtensionFailure = 28 - -// ErrExtensionUnsupported indicates that an extension defined in -// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this -// error indicates that the agent returned a standard SSH_AGENT_FAILURE message -// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol -// specification (and therefore this error) does not distinguish between a -// specific extension being unsupported and extensions being unsupported entirely. -var ErrExtensionUnsupported = errors.New("agent: extension unsupported") - -type extensionAgentMsg struct { - ExtensionType string `sshtype:"27"` - Contents []byte -} - -// Key represents a protocol 2 public key as defined in -// [PROTOCOL.agent], section 2.5.2. -type Key struct { - Format string - Blob []byte - Comment string -} - -func clientErr(err error) error { - return fmt.Errorf("agent: client error: %v", err) -} - -// String returns the storage form of an agent key with the format, base64 -// encoded serialized key, and the comment if it is not empty. -func (k *Key) String() string { - s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) - - if k.Comment != "" { - s += " " + k.Comment - } - - return s -} - -// Type returns the public key type. -func (k *Key) Type() string { - return k.Format -} - -// Marshal returns key blob to satisfy the ssh.PublicKey interface. -func (k *Key) Marshal() []byte { - return k.Blob -} - -// Verify satisfies the ssh.PublicKey interface. -func (k *Key) Verify(data []byte, sig *ssh.Signature) error { - pubKey, err := ssh.ParsePublicKey(k.Blob) - if err != nil { - return fmt.Errorf("agent: bad public key: %v", err) - } - return pubKey.Verify(data, sig) -} - -type wireKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -func parseKey(in []byte) (out *Key, rest []byte, err error) { - var record struct { - Blob []byte - Comment string - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(in, &record); err != nil { - return nil, nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(record.Blob, &wk); err != nil { - return nil, nil, err - } - - return &Key{ - Format: wk.Format, - Blob: record.Blob, - Comment: record.Comment, - }, record.Rest, nil -} - -// client is a client for an ssh-agent process. -type client struct { - // conn is typically a *net.UnixConn - conn io.ReadWriter - // mu is used to prevent concurrent access to the agent - mu sync.Mutex -} - -// NewClient returns an Agent that talks to an ssh-agent process over -// the given connection. -func NewClient(rw io.ReadWriter) ExtendedAgent { - return &client{conn: rw} -} - -// call sends an RPC to the agent. On success, the reply is -// unmarshaled into reply and replyType is set to the first byte of -// the reply, which contains the type of the message. -func (c *client) call(req []byte) (reply interface{}, err error) { - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - reply, err = unmarshal(buf) - if err != nil { - return nil, clientErr(err) - } - return reply, nil -} - -// callRaw sends an RPC to the agent. On success, the raw -// bytes of the response are returned; no unmarshalling is -// performed on the response. -func (c *client) callRaw(req []byte) (reply []byte, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - msg := make([]byte, 4+len(req)) - binary.BigEndian.PutUint32(msg, uint32(len(req))) - copy(msg[4:], req) - if _, err = c.conn.Write(msg); err != nil { - return nil, clientErr(err) - } - - var respSizeBuf [4]byte - if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { - return nil, clientErr(err) - } - respSize := binary.BigEndian.Uint32(respSizeBuf[:]) - if respSize > maxAgentResponseBytes { - return nil, clientErr(errors.New("response too large")) - } - - buf := make([]byte, respSize) - if _, err = io.ReadFull(c.conn, buf); err != nil { - return nil, clientErr(err) - } - return buf, nil -} - -func (c *client) simpleCall(req []byte) error { - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -func (c *client) RemoveAll() error { - return c.simpleCall([]byte{agentRemoveAllIdentities}) -} - -func (c *client) Remove(key ssh.PublicKey) error { - req := ssh.Marshal(&agentRemoveIdentityMsg{ - KeyBlob: key.Marshal(), - }) - return c.simpleCall(req) -} - -func (c *client) Lock(passphrase []byte) error { - req := ssh.Marshal(&agentLockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -func (c *client) Unlock(passphrase []byte) error { - req := ssh.Marshal(&agentUnlockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -// List returns the identities known to the agent. -func (c *client) List() ([]*Key, error) { - // see [PROTOCOL.agent] section 2.5.2. - req := []byte{agentRequestIdentities} - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *identitiesAnswerAgentMsg: - if msg.NumKeys > maxAgentResponseBytes/8 { - return nil, errors.New("agent: too many keys in agent reply") - } - keys := make([]*Key, msg.NumKeys) - data := msg.Keys - for i := uint32(0); i < msg.NumKeys; i++ { - var key *Key - var err error - if key, data, err = parseKey(data); err != nil { - return nil, err - } - keys[i] = key - } - return keys, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to list keys") - } - panic("unreachable") -} - -// Sign has the agent sign the data using a protocol 2 key as defined -// in [PROTOCOL.agent] section 2.6.2. -func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return c.SignWithFlags(key, data, 0) -} - -func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - req := ssh.Marshal(signRequestAgentMsg{ - KeyBlob: key.Marshal(), - Data: data, - Flags: uint32(flags), - }) - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *signResponseAgentMsg: - var sig ssh.Signature - if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { - return nil, err - } - - return &sig, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to sign challenge") - } - panic("unreachable") -} - -// unmarshal parses an agent message in packet, returning the parsed -// form and the message type of packet. -func unmarshal(packet []byte) (interface{}, error) { - if len(packet) < 1 { - return nil, errors.New("agent: empty packet") - } - var msg interface{} - switch packet[0] { - case agentFailure: - return new(failureAgentMsg), nil - case agentSuccess: - return new(successAgentMsg), nil - case agentIdentitiesAnswer: - msg = new(identitiesAnswerAgentMsg) - case agentSignResponse: - msg = new(signResponseAgentMsg) - case agentV1IdentitiesAnswer: - msg = new(agentV1IdentityMsg) - default: - return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) - } - if err := ssh.Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -type rsaKeyMsg struct { - Type string `sshtype:"17|25"` - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaKeyMsg struct { - Type string `sshtype:"17|25"` - P *big.Int - Q *big.Int - G *big.Int - Y *big.Int - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaKeyMsg struct { - Type string `sshtype:"17|25"` - Curve string - KeyBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519KeyMsg struct { - Type string `sshtype:"17|25"` - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Insert adds a private key to the agent. -func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaKeyMsg{ - Type: ssh.KeyAlgoRSA, - N: k.N, - E: big.NewInt(int64(k.E)), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaKeyMsg{ - Type: ssh.KeyAlgoDSA, - P: k.P, - Q: k.Q, - G: k.G, - Y: k.Y, - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) - req = ssh.Marshal(ecdsaKeyMsg{ - Type: "ecdsa-sha2-" + nistID, - Curve: nistID, - KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -type rsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519CertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Add adds a private key to the agent. If a certificate is given, -// that certificate is added instead as public key. -func (c *client) Add(key AddedKey) error { - var constraints []byte - - if secs := key.LifetimeSecs; secs != 0 { - constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...) - } - - if key.ConfirmBeforeUse { - constraints = append(constraints, agentConstrainConfirm) - } - - cert := key.Certificate - if cert == nil { - return c.insertKey(key.PrivateKey, key.Comment, constraints) - } - return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) -} - -func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - req = ssh.Marshal(ecdsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - signer, err := ssh.NewSignerFromKey(s) - if err != nil { - return err - } - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return errors.New("agent: signer and cert have different public key") - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -// Signers provides a callback for client authentication. -func (c *client) Signers() ([]ssh.Signer, error) { - keys, err := c.List() - if err != nil { - return nil, err - } - - var result []ssh.Signer - for _, k := range keys { - result = append(result, &agentKeyringSigner{c, k}) - } - return result, nil -} - -type agentKeyringSigner struct { - agent *client - pub ssh.PublicKey -} - -func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { - return s.pub -} - -func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { - // The agent has its own entropy source, so the rand argument is ignored. - return s.agent.Sign(s.pub, data) -} - -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { - var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } - } - return s.agent.SignWithFlags(s.pub, data, flags) -} - -// Calls an extension method. It is up to the agent implementation as to whether or not -// any particular extension is supported and may always return an error. Because the -// type of the response is up to the implementation, this returns the bytes of the -// response and does not attempt any type of unmarshalling. -func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) { - req := ssh.Marshal(extensionAgentMsg{ - ExtensionType: extensionType, - Contents: contents, - }) - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - if len(buf) == 0 { - return nil, errors.New("agent: failure; empty response") - } - // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message - // represents an agent that does not support the extension - if buf[0] == agentFailure { - return nil, ErrExtensionUnsupported - } - if buf[0] == agentExtensionFailure { - return nil, errors.New("agent: generic extension failure") - } - - return buf, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go deleted file mode 100644 index fd24ba900..000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/forward.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "errors" - "io" - "net" - "sync" - - "golang.org/x/crypto/ssh" -) - -// RequestAgentForwarding sets up agent forwarding for the session. -// ForwardToAgent or ForwardToRemote should be called to route -// the authentication requests. -func RequestAgentForwarding(session *ssh.Session) error { - ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) - if err != nil { - return err - } - if !ok { - return errors.New("forwarding request denied") - } - return nil -} - -// ForwardToAgent routes authentication requests to the given keyring. -func ForwardToAgent(client *ssh.Client, keyring Agent) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go func() { - ServeAgent(keyring, channel) - channel.Close() - }() - } - }() - return nil -} - -const channelType = "auth-agent@openssh.com" - -// ForwardToRemote routes authentication requests to the ssh-agent -// process serving on the given unix socket. -func ForwardToRemote(client *ssh.Client, addr string) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - conn, err := net.Dial("unix", addr) - if err != nil { - return err - } - conn.Close() - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go forwardUnixSocket(channel, addr) - } - }() - return nil -} - -func forwardUnixSocket(channel ssh.Channel, addr string) { - conn, err := net.Dial("unix", addr) - if err != nil { - return - } - - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(conn, channel) - conn.(*net.UnixConn).CloseWrite() - wg.Done() - }() - go func() { - io.Copy(channel, conn) - channel.CloseWrite() - wg.Done() - }() - - wg.Wait() - conn.Close() - channel.Close() -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go deleted file mode 100644 index c9d979430..000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "bytes" - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "sync" - "time" - - "golang.org/x/crypto/ssh" -) - -type privKey struct { - signer ssh.Signer - comment string - expire *time.Time -} - -type keyring struct { - mu sync.Mutex - keys []privKey - - locked bool - passphrase []byte -} - -var errLocked = errors.New("agent: locked") - -// NewKeyring returns an Agent that holds keys in memory. It is safe -// for concurrent use by multiple goroutines. -func NewKeyring() Agent { - return &keyring{} -} - -// RemoveAll removes all identities. -func (r *keyring) RemoveAll() error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.keys = nil - return nil -} - -// removeLocked does the actual key removal. The caller must already be holding the -// keyring mutex. -func (r *keyring) removeLocked(want []byte) error { - found := false - for i := 0; i < len(r.keys); { - if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { - found = true - r.keys[i] = r.keys[len(r.keys)-1] - r.keys = r.keys[:len(r.keys)-1] - continue - } else { - i++ - } - } - - if !found { - return errors.New("agent: key not found") - } - return nil -} - -// Remove removes all identities with the given public key. -func (r *keyring) Remove(key ssh.PublicKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - return r.removeLocked(key.Marshal()) -} - -// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. -func (r *keyring) Lock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.locked = true - r.passphrase = passphrase - return nil -} - -// Unlock undoes the effect of Lock -func (r *keyring) Unlock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if !r.locked { - return errors.New("agent: not locked") - } - if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { - return fmt.Errorf("agent: incorrect passphrase") - } - - r.locked = false - r.passphrase = nil - return nil -} - -// expireKeysLocked removes expired keys from the keyring. If a key was added -// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have -// ellapsed, it is removed. The caller *must* be holding the keyring mutex. -func (r *keyring) expireKeysLocked() { - for _, k := range r.keys { - if k.expire != nil && time.Now().After(*k.expire) { - r.removeLocked(k.signer.PublicKey().Marshal()) - } - } -} - -// List returns the identities known to the agent. -func (r *keyring) List() ([]*Key, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - // section 2.7: locked agents return empty. - return nil, nil - } - - r.expireKeysLocked() - var ids []*Key - for _, k := range r.keys { - pub := k.signer.PublicKey() - ids = append(ids, &Key{ - Format: pub.Type(), - Blob: pub.Marshal(), - Comment: k.comment}) - } - return ids, nil -} - -// Insert adds a private key to the keyring. If a certificate -// is given, that certificate is added as public key. Note that -// any constraints given are ignored. -func (r *keyring) Add(key AddedKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - signer, err := ssh.NewSignerFromKey(key.PrivateKey) - - if err != nil { - return err - } - - if cert := key.Certificate; cert != nil { - signer, err = ssh.NewCertSigner(cert, signer) - if err != nil { - return err - } - } - - p := privKey{ - signer: signer, - comment: key.Comment, - } - - if key.LifetimeSecs > 0 { - t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) - p.expire = &t - } - - r.keys = append(r.keys, p) - - return nil -} - -// Sign returns a signature for the data. -func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return r.SignWithFlags(key, data, 0) -} - -func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - wanted := key.Marshal() - for _, k := range r.keys { - if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { - if flags == 0 { - return k.signer.Sign(rand.Reader, data) - } else { - if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok { - return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer) - } else { - var algorithm string - switch flags { - case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 - case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 - default: - return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) - } - return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm) - } - } - } - } - return nil, errors.New("not found") -} - -// Signers returns signers for all the known keys. -func (r *keyring) Signers() ([]ssh.Signer, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - s := make([]ssh.Signer, 0, len(r.keys)) - for _, k := range r.keys { - s = append(s, k.signer) - } - return s, nil -} - -// The keyring does not support any extensions -func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) { - return nil, ErrExtensionUnsupported -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go deleted file mode 100644 index 6e7a1e02f..000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/server.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "math/big" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// Server wraps an Agent and uses it to implement the agent side of -// the SSH-agent, wire protocol. -type server struct { - agent Agent -} - -func (s *server) processRequestBytes(reqData []byte) []byte { - rep, err := s.processRequest(reqData) - if err != nil { - if err != errLocked { - // TODO(hanwen): provide better logging interface? - log.Printf("agent %d: %v", reqData[0], err) - } - return []byte{agentFailure} - } - - if err == nil && rep == nil { - return []byte{agentSuccess} - } - - return ssh.Marshal(rep) -} - -func marshalKey(k *Key) []byte { - var record struct { - Blob []byte - Comment string - } - record.Blob = k.Marshal() - record.Comment = k.Comment - - return ssh.Marshal(&record) -} - -// See [PROTOCOL.agent], section 2.5.1. -const agentV1IdentitiesAnswer = 2 - -type agentV1IdentityMsg struct { - Numkeys uint32 `sshtype:"2"` -} - -type agentRemoveIdentityMsg struct { - KeyBlob []byte `sshtype:"18"` -} - -type agentLockMsg struct { - Passphrase []byte `sshtype:"22"` -} - -type agentUnlockMsg struct { - Passphrase []byte `sshtype:"23"` -} - -func (s *server) processRequest(data []byte) (interface{}, error) { - switch data[0] { - case agentRequestV1Identities: - return &agentV1IdentityMsg{0}, nil - - case agentRemoveAllV1Identities: - return nil, nil - - case agentRemoveIdentity: - var req agentRemoveIdentityMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) - - case agentRemoveAllIdentities: - return nil, s.agent.RemoveAll() - - case agentLock: - var req agentLockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - return nil, s.agent.Lock(req.Passphrase) - - case agentUnlock: - var req agentUnlockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - return nil, s.agent.Unlock(req.Passphrase) - - case agentSignRequest: - var req signRequestAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - k := &Key{ - Format: wk.Format, - Blob: req.KeyBlob, - } - - var sig *ssh.Signature - var err error - if extendedAgent, ok := s.agent.(ExtendedAgent); ok { - sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags)) - } else { - sig, err = s.agent.Sign(k, req.Data) - } - - if err != nil { - return nil, err - } - return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil - - case agentRequestIdentities: - keys, err := s.agent.List() - if err != nil { - return nil, err - } - - rep := identitiesAnswerAgentMsg{ - NumKeys: uint32(len(keys)), - } - for _, k := range keys { - rep.Keys = append(rep.Keys, marshalKey(k)...) - } - return rep, nil - - case agentAddIDConstrained, agentAddIdentity: - return nil, s.insertIdentity(data) - - case agentExtension: - // Return a stub object where the whole contents of the response gets marshaled. - var responseStub struct { - Rest []byte `ssh:"rest"` - } - - if extendedAgent, ok := s.agent.(ExtendedAgent); !ok { - // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7 - // requires that we return a standard SSH_AGENT_FAILURE message. - responseStub.Rest = []byte{agentFailure} - } else { - var req extensionAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - res, err := extendedAgent.Extension(req.ExtensionType, req.Contents) - if err != nil { - // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE - // message as required by [PROTOCOL.agent] section 4.7. - if err == ErrExtensionUnsupported { - responseStub.Rest = []byte{agentFailure} - } else { - // As the result of any other error processing an extension request, - // [PROTOCOL.agent] section 4.7 requires that we return a - // SSH_AGENT_EXTENSION_FAILURE code. - responseStub.Rest = []byte{agentExtensionFailure} - } - } else { - if len(res) == 0 { - return nil, nil - } - responseStub.Rest = res - } - } - - return responseStub, nil - } - - return nil, fmt.Errorf("unknown opcode %d", data[0]) -} - -func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) { - for len(constraints) != 0 { - switch constraints[0] { - case agentConstrainLifetime: - lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) - constraints = constraints[5:] - case agentConstrainConfirm: - confirmBeforeUse = true - constraints = constraints[1:] - case agentConstrainExtension: - var msg constrainExtensionAgentMsg - if err = ssh.Unmarshal(constraints, &msg); err != nil { - return 0, false, nil, err - } - extensions = append(extensions, ConstraintExtension{ - ExtensionName: msg.ExtensionName, - ExtensionDetails: msg.ExtensionDetails, - }) - constraints = msg.Rest - default: - return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0]) - } - } - return -} - -func setConstraints(key *AddedKey, constraintBytes []byte) error { - lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes) - if err != nil { - return err - } - - key.LifetimeSecs = lifetimeSecs - key.ConfirmBeforeUse = confirmBeforeUse - key.ConstraintExtensions = constraintExtensions - return nil -} - -func parseRSAKey(req []byte) (*AddedKey, error) { - var k rsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - if k.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(k.E.Int64()), - N: k.N, - }, - D: k.D, - Primes: []*big.Int{k.P, k.Q}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseEd25519Key(req []byte) (*AddedKey, error) { - var k ed25519KeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - - addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSAKey(req []byte) (*AddedKey, error) { - var k dsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { - priv = &ecdsa.PrivateKey{ - D: privScalar, - } - - switch curveName { - case "nistp256": - priv.Curve = elliptic.P256() - case "nistp384": - priv.Curve = elliptic.P384() - case "nistp521": - priv.Curve = elliptic.P521() - default: - return nil, fmt.Errorf("agent: unknown curve %q", curveName) - } - - priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) - if priv.X == nil || priv.Y == nil { - return nil, errors.New("agent: point not on curve") - } - - return priv, nil -} - -func parseEd25519Cert(req []byte) (*AddedKey, error) { - var k ed25519CertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ED25519 certificate") - } - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSAKey(req []byte) (*AddedKey, error) { - var k ecdsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseRSACert(req []byte) (*AddedKey, error) { - var k rsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad RSA certificate") - } - - // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go - var rsaPub struct { - Name string - E *big.Int - N *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - if rsaPub.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - - priv := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(rsaPub.E.Int64()), - N: rsaPub.N, - }, - D: k.D, - Primes: []*big.Int{k.Q, k.P}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSACert(req []byte) (*AddedKey, error) { - var k dsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad DSA certificate") - } - - // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go - var w struct { - Name string - P, Q, G, Y *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - }, - Y: w.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSACert(req []byte) (*AddedKey, error) { - var k ecdsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ECDSA certificate") - } - - // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go - var ecdsaPub struct { - Name string - ID string - Key []byte - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func (s *server) insertIdentity(req []byte) error { - var record struct { - Type string `sshtype:"17|25"` - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(req, &record); err != nil { - return err - } - - var addedKey *AddedKey - var err error - - switch record.Type { - case ssh.KeyAlgoRSA: - addedKey, err = parseRSAKey(req) - case ssh.KeyAlgoDSA: - addedKey, err = parseDSAKey(req) - case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: - addedKey, err = parseECDSAKey(req) - case ssh.KeyAlgoED25519: - addedKey, err = parseEd25519Key(req) - case ssh.CertAlgoRSAv01: - addedKey, err = parseRSACert(req) - case ssh.CertAlgoDSAv01: - addedKey, err = parseDSACert(req) - case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: - addedKey, err = parseECDSACert(req) - case ssh.CertAlgoED25519v01: - addedKey, err = parseEd25519Cert(req) - default: - return fmt.Errorf("agent: not implemented: %q", record.Type) - } - - if err != nil { - return err - } - return s.agent.Add(*addedKey) -} - -// ServeAgent serves the agent protocol on the given connection. It -// returns when an I/O error occurs. -func ServeAgent(agent Agent, c io.ReadWriter) error { - s := &server{agent} - - var length [4]byte - for { - if _, err := io.ReadFull(c, length[:]); err != nil { - return err - } - l := binary.BigEndian.Uint32(length[:]) - if l == 0 { - return fmt.Errorf("agent: request size is 0") - } - if l > maxAgentResponseBytes { - // We also cap requests. - return fmt.Errorf("agent: request too large: %d", l) - } - - req := make([]byte, l) - if _, err := io.ReadFull(c, req); err != nil { - return err - } - - repData := s.processRequestBytes(req) - if len(repData) > maxAgentResponseBytes { - return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) - } - - binary.BigEndian.PutUint32(length[:], uint32(len(repData))) - if _, err := c.Write(length[:]); err != nil { - return err - } - if _, err := c.Write(repData); err != nil { - return err - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d078..000000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 00ed9923e..000000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoED25519: CertAlgoED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00d..000000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index a65a923be..000000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,770 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "math/bits" - - "golang.org/x/crypto/internal/chacha20" - "golang.org/x/crypto/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [8]uint32 - contentKey [8]uint32 - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - for i := range c.contentKey { - c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4]) - } - for i := range c.lengthKey { - c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4]) - } - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} - s := chacha20.New(c.contentKey, nonce) - var polyKey [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.Advance() // skip next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} - s := chacha20.New(c.contentKey, nonce) - var polyKey [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.Advance() // skip next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index 7b00bff1c..000000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index 0590070e2..000000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - tried := make(map[string]bool) - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - tried[auth.method()] = true - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if tried[candidateMethod] { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) -} - -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) - } - return s -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index d97415d2d..000000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = supportedKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681b..000000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322c0..000000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 2b10b05a4..000000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index f34bcc013..000000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, nil -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 969804794..000000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1100 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" -) - -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 -const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := ed25519.PublicKey(w.KeyBytes) - - return (ed25519PublicKey)(key), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - edKey := (ed25519.PublicKey)(k) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If wrong passphrase, return -// x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - buf := block.Bytes - - if encryptedBlock(block) { - if x509.IsEncryptedPEMBlock(block) { - var err error - buf, err = x509.DecryptPEMBlock(block, passPhrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - } - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - - if w.KdfName != "none" || w.CipherName != "none" { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") - } - - // we only handle ed25519 and rsa keys currently - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a06285..000000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index db914d803..000000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,836 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index f19016270..000000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index ac7f8073c..000000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index d3321f6b7..000000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e8..000000000 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330b..000000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5ec..000000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index 49ddc2e7d..000000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index a3c021d3f..000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b7d..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index d88bd1db1..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592df..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index b105f80be..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go deleted file mode 100644 index e6cd0ced3..000000000 --- a/vendor/golang.org/x/net/http/httpguts/guts.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httpguts provides functions implementing various details -// of the HTTP specification. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httpguts - -import ( - "net/textproto" - "strings" -) - -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See RFC 7230, Section 4.1.2 -func ValidTrailerHeader(name string) bool { - name = textproto.CanonicalMIMEHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go deleted file mode 100644 index e7de24ee6..000000000 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httpguts - -import ( - "net" - "strings" - "unicode/utf8" - - "golang.org/x/net/idna" -) - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) -} - -// HeaderValuesContainsToken reports whether any string in values -// contains the provided token, ASCII case-insensitively. -func HeaderValuesContainsToken(values []string, token string) bool { - for _, v := range values { - if headerValueContainsToken(v, token) { - return true - } - } - return false -} - -// isOWS reports whether b is an optional whitespace byte, as defined -// by RFC 7230 section 3.2.3. -func isOWS(b byte) bool { return b == ' ' || b == '\t' } - -// trimOWS returns x with all optional whitespace removes from the -// beginning and end. -func trimOWS(x string) string { - // TODO: consider using strings.Trim(x, " \t") instead, - // if and when it's fast enough. See issue 10292. - // But this ASCII-only code will probably always beat UTF-8 - // aware code. - for len(x) > 0 && isOWS(x[0]) { - x = x[1:] - } - for len(x) > 0 && isOWS(x[len(x)-1]) { - x = x[:len(x)-1] - } - return x -} - -// headerValueContainsToken reports whether v (assumed to be a -// 0#element, in the ABNF extension described in RFC 7230 section 7) -// contains token amongst its comma-separated tokens, ASCII -// case-insensitively. -func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) - } - return tokenEqual(v, token) -} - -// lowerASCII returns the ASCII lowercase version of b. -func lowerASCII(b byte) byte { - if 'A' <= b && b <= 'Z' { - return b + ('a' - 'A') - } - return b -} - -// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. -func tokenEqual(t1, t2 string) bool { - if len(t1) != len(t2) { - return false - } - for i, b := range t1 { - if b >= utf8.RuneSelf { - // No UTF-8 or non-ASCII allowed in tokens. - return false - } - if lowerASCII(byte(b)) != lowerASCII(t2[i]) { - return false - } - } - return true -} - -// isLWS reports whether b is linear white space, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) -func isLWS(b byte) bool { return b == ' ' || b == '\t' } - -// isCTL reports whether b is a control byte, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = -func isCTL(b byte) bool { - const del = 0x7f // a CTL - return b < ' ' || b == del -} - -// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. -// HTTP/2 imposes the additional restriction that uppercase ASCII -// letters are not allowed. -// -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -func ValidHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !IsTokenRune(r) { - return false - } - } - return true -} - -// ValidHostHeader reports whether h is a valid host header. -func ValidHostHeader(h string) bool { - // The latest spec is actually this: - // - // http://tools.ietf.org/html/rfc7230#section-5.4 - // Host = uri-host [ ":" port ] - // - // Where uri-host is: - // http://tools.ietf.org/html/rfc3986#section-3.2.2 - // - // But we're going to be much more lenient for now and just - // search for any byte that's not a valid byte in any of those - // expressions. - for i := 0; i < len(h); i++ { - if !validHostByte[h[i]] { - return false - } - } - return true -} - -// See the validHostHeader comment. -var validHostByte = [256]bool{ - '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, - '8': true, '9': true, - - 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, - 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, - 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, - 'y': true, 'z': true, - - 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, - 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, - 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, - 'Y': true, 'Z': true, - - '!': true, // sub-delims - '$': true, // sub-delims - '%': true, // pct-encoded (and used in IPv6 zones) - '&': true, // sub-delims - '(': true, // sub-delims - ')': true, // sub-delims - '*': true, // sub-delims - '+': true, // sub-delims - ',': true, // sub-delims - '-': true, // unreserved - '.': true, // unreserved - ':': true, // IPv6address + Host expression's optional port - ';': true, // sub-delims - '=': true, // sub-delims - '[': true, - '\'': true, // sub-delims - ']': true, - '_': true, // unreserved - '~': true, // unreserved -} - -// ValidHeaderFieldValue reports whether v is a valid "field-value" according to -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : -// -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = -// -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : -// -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func ValidHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - b := v[i] - if isCTL(b) && !isLWS(b) { - return false - } - } - return true -} - -func isASCII(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} - -// PunycodeHostPort returns the IDNA Punycode version -// of the provided "host" or "host:port" string. -func PunycodeHostPort(v string) (string, error) { - if isASCII(v) { - return v, nil - } - - host, port, err := net.SplitHostPort(v) - if err != nil { - // The input 'v' argument was just a "host" argument, - // without a port. This error should not be returned - // to the caller. - host = v - port = "" - } - host, err = idna.ToASCII(host) - if err != nil { - // Non-UTF-8? Not representable in Punycode, in any - // case. - return "", err - } - if port == "" { - return host, nil - } - return net.JoinHostPort(host, port), nil -} diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore deleted file mode 100644 index 190f12234..000000000 --- a/vendor/golang.org/x/net/http2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc52579..000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f7..000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa37..000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go deleted file mode 100644 index c9a0cf3b4..000000000 --- a/vendor/golang.org/x/net/http2/ciphers.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -// A list of the possible cipher suite ids. Taken from -// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt - -const ( - cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 - cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 - cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 - cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 - cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 - cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 - cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 - cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 - cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 - cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 - cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A - cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B - cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C - cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D - cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E - cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F - cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 - cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 - cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 - cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 - cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 - cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 - cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 - cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 - cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 - cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 - cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A - cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B - // Reserved uint16 = 0x001C-1D - cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F - cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 - cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 - cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 - cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 - cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B - cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C - cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D - cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E - cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 - cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A - cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C - cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 - // Reserved uint16 = 0x0047-4F - // Reserved uint16 = 0x0050-58 - // Reserved uint16 = 0x0059-5C - // Unassigned uint16 = 0x005D-5F - // Reserved uint16 = 0x0060-66 - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D - // Unassigned uint16 = 0x006E-83 - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 - cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A - cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B - cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C - cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D - cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E - cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 - cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 - cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 - cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 - cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 - cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 - cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 - cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A - cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B - cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C - cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D - cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E - cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F - cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 - cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 - cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 - cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 - cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 - cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 - cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 - cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 - cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 - cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 - cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA - cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB - cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC - cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD - cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE - cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF - cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 - cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 - cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 - cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 - cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 - cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 - // Unassigned uint16 = 0x00C6-FE - cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF - // Unassigned uint16 = 0x01-55,* - cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 - // Unassigned uint16 = 0x5601 - 0xC000 - cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 - cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 - cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 - cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 - cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 - cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A - cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B - cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C - cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F - cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 - cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 - cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 - cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 - cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 - cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 - cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 - cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 - cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A - cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B - cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C - cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D - cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E - cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F - cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 - cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 - cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C - cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D - cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E - cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F - cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 - cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 - cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 - cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 - cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B - cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C - cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D - cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E - cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F - cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 - cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 - cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 - cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 - cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 - cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 - cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 - cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D - cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E - cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F - cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 - cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 - cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 - cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 - cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 - cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 - cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 - cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 - cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 - cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 - cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A - cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 - cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 - cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 - cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 - cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 - cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 - cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 - cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 - cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 - cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A - cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B - cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C - cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D - cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E - cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F - cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 - cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 - cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A - cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 - cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 - cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D - cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E - cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 - cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 - cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B - cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C - cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D - cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E - cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F - cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 - cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 - cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 - cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 - cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 - cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 - cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 - cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 - cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 - cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 - cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA - cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF - // Unassigned uint16 = 0xC0B0-FF - // Unassigned uint16 = 0xC1-CB,* - // Unassigned uint16 = 0xCC00-A7 - cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 - cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 - cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA - cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB - cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC - cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD - cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE -) - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -// References: -// https://tools.ietf.org/html/rfc7540#appendix-A -// Reject cipher suites from Appendix A. -// "This list includes those cipher suites that do not -// offer an ephemeral key exchange and those that are -// based on the TLS null, stream or block cipher type" -func isBadCipher(cipher uint16) bool { - switch cipher { - case cipher_TLS_NULL_WITH_NULL_NULL, - cipher_TLS_RSA_WITH_NULL_MD5, - cipher_TLS_RSA_WITH_NULL_SHA, - cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_RSA_WITH_RC4_128_MD5, - cipher_TLS_RSA_WITH_RC4_128_SHA, - cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, - cipher_TLS_RSA_WITH_IDEA_CBC_SHA, - cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_RSA_WITH_DES_CBC_SHA, - cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, - cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, - cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_DH_anon_WITH_RC4_128_MD5, - cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, - cipher_TLS_DH_anon_WITH_DES_CBC_SHA, - cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_KRB5_WITH_DES_CBC_SHA, - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_KRB5_WITH_RC4_128_SHA, - cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, - cipher_TLS_KRB5_WITH_DES_CBC_MD5, - cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, - cipher_TLS_KRB5_WITH_RC4_128_MD5, - cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, - cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, - cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, - cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, - cipher_TLS_PSK_WITH_NULL_SHA, - cipher_TLS_DHE_PSK_WITH_NULL_SHA, - cipher_TLS_RSA_PSK_WITH_NULL_SHA, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, - cipher_TLS_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_NULL_SHA256, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, - cipher_TLS_PSK_WITH_RC4_128_SHA, - cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, - cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, - cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, - cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, - cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, - cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, - cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, - cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, - cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, - cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_PSK_WITH_NULL_SHA256, - cipher_TLS_PSK_WITH_NULL_SHA384, - cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_NULL_SHA256, - cipher_TLS_DHE_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_NULL_SHA256, - cipher_TLS_RSA_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, - cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, - cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, - cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, - cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_NULL_SHA, - cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, - cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, - cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_NULL_SHA, - cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, - cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, - cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, - cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, - cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, - cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, - cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, - cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, - cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, - cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, - cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, - cipher_TLS_RSA_WITH_AES_128_CCM, - cipher_TLS_RSA_WITH_AES_256_CCM, - cipher_TLS_RSA_WITH_AES_128_CCM_8, - cipher_TLS_RSA_WITH_AES_256_CCM_8, - cipher_TLS_PSK_WITH_AES_128_CCM, - cipher_TLS_PSK_WITH_AES_256_CCM, - cipher_TLS_PSK_WITH_AES_128_CCM_8, - cipher_TLS_PSK_WITH_AES_256_CCM_8: - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index f4d9b5ece..000000000 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// clientConnPoolIdleCloser is the interface implemented by ClientConnPool -// implementations which can close their idle connections. -type clientConnPoolIdleCloser interface { - ClientConnPool - closeIdleConnections() -} - -var ( - _ clientConnPoolIdleCloser = (*clientConnPool)(nil) - _ clientConnPoolIdleCloser = noDialClientConnPool{} -) - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -// shouldTraceGetConn reports whether getClientConn should call any -// ClientTrace.GetConn hook associated with the http.Request. -// -// This complexity is needed to avoid double calls of the GetConn hook -// during the back-and-forth between net/http and x/net/http2 (when the -// net/http.Transport is upgraded to also speak http2), as well as support -// the case where x/net/http2 is being used directly. -func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { - // If our Transport wasn't made via ConfigureTransport, always - // trace the GetConn hook if provided, because that means the - // http2 package is being used directly and it's the one - // dialing, as opposed to net/http. - if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { - return true - } - // Otherwise, only use the GetConn hook if this connection has - // been used previously for other requests. For fresh - // connections, the net/http package does the dialing. - return !st.freshConn -} - -func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if isConnectionCloseRequest(req) && dialOnMiss { - // It gets its own connection. - traceGetConn(req, addr) - const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) - if err != nil { - return nil, err - } - return cc, nil - } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if st := cc.idleState(); st.canTakeNewRequest { - if p.shouldTraceGetConn(st) { - traceGetConn(req, addr) - } - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - traceGetConn(req, addr) - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go deleted file mode 100644 index a3067f8de..000000000 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" - "sync" -) - -// Buffer chunks are allocated from a pool to reduce pressure on GC. -// The maximum wasted space per dataBuffer is 2x the largest size class, -// which happens when the dataBuffer has multiple chunks and there is -// one unread byte in both the first and last chunks. We use a few size -// classes to minimize overheads for servers that typically receive very -// small request bodies. -// -// TODO: Benchmark to determine if the pools are necessary. The GC may have -// improved enough that we can instead allocate chunks like this: -// make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) - -func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } - } - return dataChunkPools[i].Get().([]byte) -} - -func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } - } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) -} - -// dataBuffer is an io.ReadWriter backed by a list of data chunks. -// Each dataBuffer is used to read DATA frames on a single stream. -// The buffer is divided into chunks so the server can limit the -// total memory used by a single connection without limiting the -// request body size on any single stream. -type dataBuffer struct { - chunks [][]byte - r int // next byte to read is chunks[0][r] - w int // next byte to write is chunks[len(chunks)-1][w] - size int // total buffered bytes - expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) -} - -var errReadEmpty = errors.New("read from empty dataBuffer") - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *dataBuffer) Read(p []byte) (int, error) { - if b.size == 0 { - return 0, errReadEmpty - } - var ntotal int - for len(p) > 0 && b.size > 0 { - readFrom := b.bytesFromFirstChunk() - n := copy(p, readFrom) - p = p[n:] - ntotal += n - b.r += n - b.size -= n - // If the first chunk has been consumed, advance to the next chunk. - if b.r == len(b.chunks[0]) { - putDataBufferChunk(b.chunks[0]) - end := len(b.chunks) - 1 - copy(b.chunks[:end], b.chunks[1:]) - b.chunks[end] = nil - b.chunks = b.chunks[:end] - b.r = 0 - } - } - return ntotal, nil -} - -func (b *dataBuffer) bytesFromFirstChunk() []byte { - if len(b.chunks) == 1 { - return b.chunks[0][b.r:b.w] - } - return b.chunks[0][b.r:] -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *dataBuffer) Len() int { - return b.size -} - -// Write appends p to the buffer. -func (b *dataBuffer) Write(p []byte) (int, error) { - ntotal := len(p) - for len(p) > 0 { - // If the last chunk is empty, allocate a new chunk. Try to allocate - // enough to fully copy p plus any additional bytes we expect to - // receive. However, this may allocate less than len(p). - want := int64(len(p)) - if b.expected > want { - want = b.expected - } - chunk := b.lastChunkOrAlloc(want) - n := copy(chunk[b.w:], p) - p = p[n:] - b.w += n - b.size += n - b.expected -= int64(n) - } - return ntotal, nil -} - -func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { - if len(b.chunks) != 0 { - last := b.chunks[len(b.chunks)-1] - if b.w < len(last) { - return last - } - } - chunk := getDataBufferChunk(want) - b.chunks = append(b.chunks, chunk) - b.w = 0 - return chunk -} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 71f2c4631..000000000 --- a/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode - Cause error // optional additional detail -} - -func streamError(id uint32, code ErrCode) StreamError { - return StreamError{StreamID: id, Code: code} -} - -func (e StreamError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) - } - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connError represents an HTTP/2 ConnectionError error code, along -// with a string (for debugging) explaining why. -// -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(Code), after stashing away -// the Reason into the Framer's errDetail field, accessible via -// the (*Framer).ErrorDetail method. -type connError struct { - Code ErrCode // the ConnectionError error code - Reason string // additional reason -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index cea601fcd..000000000 --- a/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - sum := f.n + n - if (sum > n) == (f.n > 0) { - f.n = sum - return true - } - return false -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index 514c126c5..000000000 --- a/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1614 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if f.logWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) - return - } - f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// SetReuseFrames allows the Framer to reuse Frames. -// If called on a Framer, Frames returned by calls to ReadFrame are only -// valid until the next call to ReadFrame. -func (fr *Framer) SetReuseFrames() { - if fr.frameCache != nil { - return - } - fr.frameCache = &frameCache{} -} - -type frameCache struct { - dataFrame DataFrame -} - -func (fc *frameCache) getDataFrame() *DataFrame { - if fc == nil { - return &DataFrame{} - } - return &fc.dataFrame -} - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - logWrites: logFrameWrites, - debugReadLoggerf: log.Printf, - debugWriteLoggerf: log.Printf, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := fc.getDataFrame() - f.FrameHeader = fh - - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var ( - errStreamID = errors.New("invalid stream ID") - errDepStreamID = errors.New("invalid dependent stream ID") - errPadLength = errors.New("pad length too large") - errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") -) - -func validStreamIDOrZero(streamID uint32) bool { - return streamID&(1<<31) == 0 -} - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - return f.WriteDataPadded(streamID, endStream, data, nil) -} - -// WriteDataPadded writes a DATA frame with optional padding. -// -// If pad is nil, the padding bit is not sent. -// The length of pad must not exceed 255 bytes. -// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if len(pad) > 0 { - if len(pad) > 255 { - return errPadLength - } - if !f.AllowIllegalWrites { - for _, b := range pad { - if b != 0 { - // "Padding octets MUST be set to zero when sending." - return errPadBytes - } - } - } - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - if pad != nil { - flags |= FlagDataPadded - } - f.startWrite(FrameData, flags, streamID) - if pad != nil { - f.wbuf = append(f.wbuf, byte(len(pad))) - } - f.wbuf = append(f.wbuf, data...) - f.wbuf = append(f.wbuf, pad...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { - f.checkValid() - for i := 0; i < f.NumSettings(); i++ { - if s := f.Setting(i); s.ID == id { - return s.Val, true - } - } - return 0, false -} - -// Setting returns the setting from the frame at the given 0-based index. -// The index must be >= 0 and less than f.NumSettings(). -func (f *SettingsFrame) Setting(i int) Setting { - buf := f.p - return Setting{ - ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), - Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), - } -} - -func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } - -// HasDuplicates reports whether f contains any duplicate setting IDs. -func (f *SettingsFrame) HasDuplicates() bool { - num := f.NumSettings() - if num == 0 { - return false - } - // If it's small enough (the common case), just do the n^2 - // thing and avoid a map allocation. - if num < 10 { - for i := 0; i < num; i++ { - idi := f.Setting(i).ID - for j := i + 1; j < num; j++ { - idj := f.Setting(j).ID - if idi == idj { - return true - } - } - } - return false - } - seen := map[SettingID]bool{} - for i := 0; i < num; i++ { - id := f.Setting(i).ID - if seen[id] { - return true - } - seen[id] = true - } - return false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - for i := 0; i < f.NumSettings(); i++ { - if err := fn(f.Setting(i)); err != nil { - return err - } - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { - return errDepStreamID - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if !validStreamIDOrZero(p.StreamDep) { - return errDepStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if VerboseLogs && fr.logReads { - fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) - } - if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validWireHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - if VerboseLogs { - log.Printf("http2: invalid pseudo headers: %v", err) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 3a131016b..000000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f8c..000000000 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c3ff3fa1c..000000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" - "sync" -) - -var ( - commonBuildOnce sync.Once - commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case - commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case -) - -func buildCommonHeaderMapsOnce() { - commonBuildOnce.Do(buildCommonHeaderMaps) -} - -func buildCommonHeaderMaps() { - common := []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } - commonLowerHeader = make(map[string]string, len(common)) - commonCanonHeader = make(map[string]string, len(common)) - for _, v := range common { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - buildCommonHeaderMapsOnce() - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index 1565cf270..000000000 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.table.init() - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - i, nameValueMatch = staticTable.search(f) - if nameValueMatch { - return i, true - } - - j, nameValueMatch := e.dynTab.table.search(f) - if nameValueMatch || (i == 0 && j != 0) { - return j + uint64(staticTable.len()), nameValueMatch - } - - return i, false -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index 85f18a2b0..000000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid pseudo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7541 section 4.1. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer - - firstField bool // processing the first field of the header block -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - firstField: true, - } - d.dynTab.table.init() - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - table headerFieldTable - size uint32 // in bytes - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -func (dt *dynamicTable) add(f HeaderField) { - dt.table.addEntry(f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff. -func (dt *dynamicTable) evict() { - var n int - for dt.size > dt.maxSize && n < dt.table.len() { - dt.size -= dt.table.ents[n].Size() - n++ - } - dt.table.evictOldest(n) -} - -func (d *Decoder) maxTableIndex() int { - // This should never overflow. RFC 7540 Section 6.5.2 limits the size of - // the dynamic table to 2^32 bytes, where each entry will occupy more than - // one byte. Further, the staticTable has a fixed, small length. - return d.dynTab.table.len() + staticTable.len() -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - // See Section 2.3.3. - if i == 0 { - return - } - if i <= uint64(staticTable.len()) { - return staticTable.ents[i-1], true - } - if i > uint64(d.maxTableIndex()) { - return - } - // In the dynamic table, newer entries have lower indices. - // However, dt.ents[0] is the oldest entry. Hence, dt.ents is - // the reversed dynamic table. - dt := d.dynTab.table - return dt.ents[dt.len()-(int(i)-staticTable.len())], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -// Close declares that the decoding is complete and resets the Decoder -// to be reused again for a new header block. If there is any remaining -// data in the decoder's buffer, Close returns an error. -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - d.firstField = true - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - d.firstField = false - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the - // beginning of the first header block following the change to the dynamic table size. - if !d.firstField && d.dynTab.size > 0 { - return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} - } - - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index b412a96c5..000000000 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - rootHuffmanNode := getRootHuffmanNode() - n := rootHuffmanNode - // cur is the bit buffer that has not been fed into n. - // cbits is the number of low order bits in cur that are valid. - // sbits is the number of bits of the symbol prefix being decoded. - cur, cbits, sbits := uint(0), uint8(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - cbits += 8 - sbits += 8 - for cbits >= 8 { - idx := byte(cur >> (cbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } else { - cbits -= 8 - } - } - } - for cbits > 0 { - n = n.children[byte(cur<<(8-cbits))] - if n == nil { - return ErrInvalidHuffman - } - if n.children != nil || n.codeLen > cbits { - break - } - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } - if sbits > 7 { - // Either there was an incomplete symbol, or overlong padding. - // Both are decoding errors per RFC 7541 section 5.2. - return ErrInvalidHuffman - } - if mask := uint(1< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index a66cfbea6..000000000 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "fmt" -) - -// headerFieldTable implements a list of HeaderFields. -// This is used to implement the static and dynamic tables. -type headerFieldTable struct { - // For static tables, entries are never evicted. - // - // For dynamic tables, entries are evicted from ents[0] and added to the end. - // Each entry has a unique id that starts at one and increments for each - // entry that is added. This unique id is stable across evictions, meaning - // it can be used as a pointer to a specific entry. As in hpack, unique ids - // are 1-based. The unique id for ents[k] is k + evictCount + 1. - // - // Zero is not a valid unique id. - // - // evictCount should not overflow in any remotely practical situation. In - // practice, we will have one dynamic table per HTTP/2 connection. If we - // assume a very powerful server that handles 1M QPS per connection and each - // request adds (then evicts) 100 entries from the table, it would still take - // 2M years for evictCount to overflow. - ents []HeaderField - evictCount uint64 - - // byName maps a HeaderField name to the unique id of the newest entry with - // the same name. See above for a definition of "unique id". - byName map[string]uint64 - - // byNameValue maps a HeaderField name/value pair to the unique id of the newest - // entry with the same name and value. See above for a definition of "unique id". - byNameValue map[pairNameValue]uint64 -} - -type pairNameValue struct { - name, value string -} - -func (t *headerFieldTable) init() { - t.byName = make(map[string]uint64) - t.byNameValue = make(map[pairNameValue]uint64) -} - -// len reports the number of entries in the table. -func (t *headerFieldTable) len() int { - return len(t.ents) -} - -// addEntry adds a new entry. -func (t *headerFieldTable) addEntry(f HeaderField) { - id := uint64(t.len()) + t.evictCount + 1 - t.byName[f.Name] = id - t.byNameValue[pairNameValue{f.Name, f.Value}] = id - t.ents = append(t.ents, f) -} - -// evictOldest evicts the n oldest entries in the table. -func (t *headerFieldTable) evictOldest(n int) { - if n > t.len() { - panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) - } - for k := 0; k < n; k++ { - f := t.ents[k] - id := t.evictCount + uint64(k) + 1 - if t.byName[f.Name] == id { - delete(t.byName, f.Name) - } - if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { - delete(t.byNameValue, p) - } - } - copy(t.ents, t.ents[n:]) - for k := t.len() - n; k < t.len(); k++ { - t.ents[k] = HeaderField{} // so strings can be garbage collected - } - t.ents = t.ents[:t.len()-n] - if t.evictCount+uint64(n) < t.evictCount { - panic("evictCount overflow") - } - t.evictCount += uint64(n) -} - -// search finds f in the table. If there is no match, i is 0. -// If both name and value match, i is the matched index and nameValueMatch -// becomes true. If only name matches, i points to that index and -// nameValueMatch becomes false. -// -// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says -// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, -// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic -// table, the return value i actually refers to the entry t.ents[t.len()-i]. -// -// All tables are assumed to be a dynamic tables except for the global -// staticTable pointer. -// -// See Section 2.3.3. -func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - if !f.Sensitive { - if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { - return t.idToIndex(id), true - } - } - if id := t.byName[f.Name]; id != 0 { - return t.idToIndex(id), false - } - return 0, false -} - -// idToIndex converts a unique id to an HPACK index. -// See Section 2.3.3. -func (t *headerFieldTable) idToIndex(id uint64) uint64 { - if id <= t.evictCount { - panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) - } - k := id - t.evictCount - 1 // convert id to an index t.ents[k] - if t != staticTable { - return uint64(t.len()) - k // dynamic table - } - return k + 1 -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = newStaticTable() -var staticTableEntries = [...]HeaderField{ - {Name: ":authority"}, - {Name: ":method", Value: "GET"}, - {Name: ":method", Value: "POST"}, - {Name: ":path", Value: "/"}, - {Name: ":path", Value: "/index.html"}, - {Name: ":scheme", Value: "http"}, - {Name: ":scheme", Value: "https"}, - {Name: ":status", Value: "200"}, - {Name: ":status", Value: "204"}, - {Name: ":status", Value: "206"}, - {Name: ":status", Value: "304"}, - {Name: ":status", Value: "400"}, - {Name: ":status", Value: "404"}, - {Name: ":status", Value: "500"}, - {Name: "accept-charset"}, - {Name: "accept-encoding", Value: "gzip, deflate"}, - {Name: "accept-language"}, - {Name: "accept-ranges"}, - {Name: "accept"}, - {Name: "access-control-allow-origin"}, - {Name: "age"}, - {Name: "allow"}, - {Name: "authorization"}, - {Name: "cache-control"}, - {Name: "content-disposition"}, - {Name: "content-encoding"}, - {Name: "content-language"}, - {Name: "content-length"}, - {Name: "content-location"}, - {Name: "content-range"}, - {Name: "content-type"}, - {Name: "cookie"}, - {Name: "date"}, - {Name: "etag"}, - {Name: "expect"}, - {Name: "expires"}, - {Name: "from"}, - {Name: "host"}, - {Name: "if-match"}, - {Name: "if-modified-since"}, - {Name: "if-none-match"}, - {Name: "if-range"}, - {Name: "if-unmodified-since"}, - {Name: "last-modified"}, - {Name: "link"}, - {Name: "location"}, - {Name: "max-forwards"}, - {Name: "proxy-authenticate"}, - {Name: "proxy-authorization"}, - {Name: "range"}, - {Name: "referer"}, - {Name: "refresh"}, - {Name: "retry-after"}, - {Name: "server"}, - {Name: "set-cookie"}, - {Name: "strict-transport-security"}, - {Name: "transfer-encoding"}, - {Name: "user-agent"}, - {Name: "vary"}, - {Name: "via"}, - {Name: "www-authenticate"}, -} - -func newStaticTable() *headerFieldTable { - t := &headerFieldTable{} - t.init() - for _, e := range staticTableEntries[:] { - t.addEntry(e) - } - return t -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index bdaba1d46..000000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 // import "golang.org/x/net/http2" - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/http/httpguts" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -// HTTP/2 stream states. -// -// See http://tools.ietf.org/html/rfc7540#section-5.1. -// -// For simplicity, the server code merges "reserved (local)" into -// "half-closed (remote)". This is one less state transition to track. -// The only downside is that we send PUSH_PROMISEs slightly less -// liberally than allowable. More discussion here: -// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html -// -// "reserved (remote)" is omitted since the client code does not -// support server push. -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httpguts.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httpguts.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -func httpCodeString(code int) string { - switch code { - case 200: - return "200" - case 404: - return "404" - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -// bufWriterPoolBufferSize is the size of bufio.Writer's -// buffers created using bufWriterPool. -// -// TODO: pick a less arbitrary value? this is a bit under -// (3 x typical 1500 byte MTU) at least. Other than that, -// not much thought went into it. -const bufWriterPoolBufferSize = 4 << 10 - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) - }, -} - -func (w *bufferedWriter) Available() int { - if w.bw == nil { - return bufWriterPoolBufferSize - } - return w.bw.Available() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 7230, section 3.3. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owns, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index 161bca7ce..000000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index a6140099c..000000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer // nil when done reading - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - if p.b == nil { - return 0 - } - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b != nil && p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - p.b = nil - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - if p.breakErr != nil { - return len(d), nil // discard when there is no reader - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - if dst == &p.breakErr { - p.b = nil - } - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index 5e01ce9ab..000000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2961 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? - maxQueuedControlFrames = 10000 -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool - - // IdleTimeout specifies how long until idle clients should be - // closed with a GOAWAY frame. PING frames are not considered - // activity for the purposes of IdleTimeout. - IdleTimeout time.Duration - - // MaxUploadBufferPerConnection is the size of the initial flow - // control window for each connections. The HTTP/2 spec does not - // allow this to be smaller than 65535 or larger than 2^32-1. - // If the value is outside this range, a default value will be - // used instead. - MaxUploadBufferPerConnection int32 - - // MaxUploadBufferPerStream is the size of the initial flow control - // window for each stream. The HTTP/2 spec does not allow this to - // be larger than 2^32-1. If the value is zero or larger than the - // maximum, a default value will be used instead. - MaxUploadBufferPerStream int32 - - // NewWriteScheduler constructs a write scheduler for a connection. - // If nil, a default scheduler is chosen. - NewWriteScheduler func() WriteScheduler - - // Internal state. This is a pointer (rather than embedded directly) - // so that we don't embed a Mutex in this struct, which will make the - // struct non-copyable, which might break some callers. - state *serverInternalState -} - -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - -type serverInternalState struct { - mu sync.Mutex - activeConns map[*serverConn]struct{} -} - -func (s *serverInternalState) registerConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - s.activeConns[sc] = struct{}{} - s.mu.Unlock() -} - -func (s *serverInternalState) unregisterConn(sc *serverConn) { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - delete(s.activeConns, sc) - s.mu.Unlock() -} - -func (s *serverInternalState) startGracefulShutdown() { - if s == nil { - return // if the Server was used without calling ConfigureServer - } - s.mu.Lock() - for sc := range s.activeConns { - sc.startGracefulShutdown() - } - s.mu.Unlock() -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if s == nil { - panic("nil *http.Server") - } - if conf == nil { - conf = new(Server) - } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} - if h1, h2 := s, conf; h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - s.RegisterOnShutdown(conf.state.startGracefulShutdown) - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - switch cs { - case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - // Alternative MTI cipher to not discourage ECDSA-only servers. - // See http://golang.org/cl/30721 for further information. - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - // The TLSNextProto interface predates contexts, so - // the net/http package passes down its per-connection - // base context via an exported but unadvertised - // method on the Handler. This is for internal - // net/http<=>http2 use only. - var ctx context.Context - type baseContexter interface { - BaseContext() context.Context - } - if bc, ok := h.(baseContexter); ok { - ctx = bc.BaseContext() - } - conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // Context is the base context to use. - // If nil, context.Background is used. - Context context.Context - - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) context() context.Context { - if o.Context != nil { - return o.Context - } - return context.Background() -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - serveMsgCh: make(chan interface{}, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialStreamSendWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - s.state.registerConn(sc) - defer s.state.unregisterConn(sc) - - // The net/http package sets the write deadline from the - // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already set. - // Write deadlines are set per stream in serverConn.newStream. - // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { - sc.conn.SetWriteDeadline(time.Time{}) - } - - if s.NewWriteScheduler != nil { - sc.writeSched = s.NewWriteScheduler() - } else { - sc.writeSched = NewRandomWriteScheduler() - } - - // These start at the RFC-specified defaults. If there is a higher - // configured value for inflow, that will be updated when we send a - // WINDOW_UPDATE shortly after sending SETTINGS. - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { - ctx, cancel = context.WithCancel(opts.context()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx context.Context - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - writeSched WriteScheduler - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - queuedControlFrames int // control frames in the writeSched queue - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialStreamSendWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder - - // Used by startGracefulShutdown. - shutdownOnce sync.Once -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -func (sc *serverConn) curOpenStreams() uint32 { - sc.serveG.check() - return sc.curClientStreams + sc.curPushedStreams -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx context.Context - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - writeDeadline *time.Timer // nil if unused - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://tools.ietf.org/html/rfc7540#section-5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID%2 == 1 { - if streamID <= sc.maxClientStreamID { - return stateClosed, nil - } - } else { - if streamID <= sc.maxPushPromiseID { - return stateClosed, nil - } - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wr FrameWriteRequest // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, - }) - sc.unackedSettings++ - - // Each connection starts with intialWindowSize inflow tokens. - // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { - sc.sendWindowUpdate(nil, int(diff)) - } - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) - defer sc.idleTimer.Stop() - } - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) - defer settingsTimer.Stop() - - loopNum := 0 - for { - loopNum++ - select { - case wr := <-sc.wantWriteFrameCh: - if se, ok := wr.write.(StreamError); ok { - sc.resetStream(se) - break - } - sc.writeFrame(wr) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer != nil { - settingsTimer.Stop() - settingsTimer = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case msg := <-sc.serveMsgCh: - switch v := msg.(type) { - case func(int): - v(loopNum) // for testing - case *serverMessage: - switch v { - case settingsTimerMsg: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case idleTimerMsg: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case shutdownTimerMsg: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case gracefulShutdownMsg: - sc.startGracefulShutdownInternal() - default: - panic("unknown timer") - } - case *startPushRequest: - sc.startPush(v) - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - } - - // If the peer is causing us to generate a lot of control frames, - // but not reading them from us, assume they are trying to make us - // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { - sc.vlogf("http2: too many control frames in send queue, closing connection") - return - } - - // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY - // with no error code (graceful shutdown), don't start the timer until - // all open streams have been completed. - sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame - gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 - if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { - sc.shutDownIn(goAwayTimeout) - } - } -} - -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - -type serverMessage int - -// Message values sent to serveMsgCh. -var ( - settingsTimerMsg = new(serverMessage) - idleTimerMsg = new(serverMessage) - shutdownTimerMsg = new(serverMessage) - gracefulShutdownMsg = new(serverMessage) -) - -func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } -func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } -func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } - -func (sc *serverConn) sendServeMsg(msg interface{}) { - sc.serveG.checkNotOn() // NOT - select { - case sc.serveMsgCh <- msg: - case <-sc.doneServing: - } -} - -var errPrefaceTimeout = errors.New("timeout waiting for client preface") - -// readPreface reads the ClientPreface greeting from the peer or -// returns errPrefaceTimeout on timeout, or an error if the greeting -// is invalid. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errPrefaceTimeout - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wr: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wr FrameWriteRequest) { - sc.serveG.check() - - // If true, wr will not be written and wr.done will not be signaled. - var ignoreWrite bool - - // We are not allowed to write frames on closed streams. RFC 7540 Section - // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on - // a closed stream." Our server never sends PRIORITY, so that exception - // does not apply. - // - // The serverConn might close an open stream while the stream's handler - // is still running. For example, the server might close a stream when it - // receives bad data from the client. If this happens, the handler might - // attempt to write a frame after the stream has been closed (since the - // handler hasn't yet been notified of the close). In this case, we simply - // ignore the frame. The handler will notice that the stream is closed when - // it waits for the frame to be written. - // - // As an exception to this rule, we allow sending RST_STREAM after close. - // This allows us to immediately reject new streams without tracking any - // state for those streams (except for the queued RST_STREAM frame). This - // may result in duplicate RST_STREAMs in some cases, but the client should - // ignore those. - if wr.StreamID() != 0 { - _, isReset := wr.write.(StreamError) - if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { - ignoreWrite = true - } - } - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wr.write.(type) { - case *writeResHeaders: - wr.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wr.stream.wroteHeaders { - // We do not need to notify wr.done because this frame is - // never written with wr.done != nil. - if wr.done != nil { - panic("wr.done != nil for write100ContinueHeadersFrame") - } - ignoreWrite = true - } - } - - if !ignoreWrite { - if wr.isControl() { - sc.queuedControlFrames++ - // For extra safety, detect wraparounds, which should not happen, - // and pull the plug. - if sc.queuedControlFrames < 0 { - sc.conn.Close() - } - } - sc.writeSched.Push(wr) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wr (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wr. -func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wr.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - switch wr.write.(type) { - case StreamError, handlerPanicRST, writeWindowUpdate: - // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE - // in this state. (We never send PRIORITY from the server, so that is not checked.) - default: - panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) - } - case stateClosed: - panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) - } - } - if wpp, ok := wr.write.(*writePushPromise); ok { - var err error - wpp.promisedID, err = wpp.allocatePromisedID() - if err != nil { - sc.writingFrameAsync = false - wr.replyToWriter(err) - return - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - if wr.write.staysWithinBuffer(sc.bw.Available()) { - sc.writingFrameAsync = false - err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) - } else { - sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) - } -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.writingFrameAsync = false - - wr := res.wr - - if writeEndsStream(wr.write) { - st := wr.stream - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for closing a ResponseWriter while still - // reading data (see possible TODO at top of - // this file), we go into closed state here - // anyway, after telling the peer we're - // hanging up on them. We'll transition to - // stateClosed after the RST_STREAM frame is - // written. - st.state = stateHalfClosedLocal - // Section 8.1: a server MAY request that the client abort - // transmission of a request without error by sending a - // RST_STREAM with an error code of NO_ERROR after sending - // a complete response. - sc.resetStream(streamError(st.id, ErrCodeNo)) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } else { - switch v := wr.write.(type) { - case StreamError: - // st may be unknown if the RST_STREAM was generated to reject bad input. - if st, ok := sc.streams[v.StreamID]; ok { - sc.closeStream(st, v) - } - case handlerPanicRST: - sc.closeStream(wr.stream, errHandlerPanicked) - } - } - - // Reply (if requested) to unblock the ServeHTTP goroutine. - wr.replyToWriter(res.err) - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written and we need to send one, the best frame -// to send is selected by writeSched. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame || sc.inFrameScheduleLoop { - return - } - sc.inFrameScheduleLoop = true - for !sc.writingFrameAsync { - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(FrameWriteRequest{ - write: &writeGoAway{ - maxStreamID: sc.maxClientStreamID, - code: sc.goAwayCode, - }, - }) - continue - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) - continue - } - if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { - if wr, ok := sc.writeSched.Pop(); ok { - if wr.isControl() { - sc.queuedControlFrames-- - } - sc.startFrameWrite(wr) - continue - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - continue - } - break - } - sc.inFrameScheduleLoop = false -} - -// startGracefulShutdown gracefully shuts down a connection. This -// sends GOAWAY with ErrCodeNo to tell the client we're gracefully -// shutting down. The connection isn't closed until all current -// streams are done. -// -// startGracefulShutdown returns immediately; it does not wait until -// the connection has shut down. -func (sc *serverConn) startGracefulShutdown() { - sc.serveG.checkNotOn() // NOT - sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) -} - -// After sending GOAWAY, the connection will close after goAwayTimeout. -// If we close the connection immediately after sending GOAWAY, there may -// be unsent data in our kernel receive buffer, which will cause the kernel -// to send a TCP RST on close() instead of a FIN. This RST will abort the -// connection immediately, whether or not the client had received the GOAWAY. -// -// Ideally we should delay for at least 1 RTT + epsilon so the client has -// a chance to read the GOAWAY and stop sending messages. Measuring RTT -// is hard, so we approximate with 1 second. See golang.org/issue/18701. -// -// This is a var so it can be shorter in tests, where all requests uses the -// loopback interface making the expected RTT very small. -// -// TODO: configurable? -var goAwayTimeout = 1 * time.Second - -func (sc *serverConn) startGracefulShutdownInternal() { - sc.goAway(ErrCodeNo) -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - if sc.inGoAway { - return - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(FrameWriteRequest{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.resetQueued = true - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *GoAwayFrame: - return sc.processGoAway(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - state, st := sc.state(f.StreamID) - if state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - if st.writeDeadline != nil { - st.writeDeadline.Stop() - } - if st.isPushed() { - sc.curPushedStreams-- - } else { - sc.curClientStreams-- - } - delete(sc.streams, st.id) - if len(sc.streams) == 0 { - sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer.Reset(sc.srv.IdleTimeout) - } - if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdownInternal() - } - } - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.CloseStream(st.id) -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if f.NumSettings() > 100 || f.HasDuplicates() { - // This isn't actually in the spec, but hang up on - // suspiciously large settings frames or those with - // duplicate entries. - return ConnectionError(ErrCodeProtocol) - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be - // acknowledged individually, even if multiple are received before the ACK. - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialStreamSendWindowSize - sc.initialStreamSendWindowSize = int32(val) - growth := int32(val) - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - state, st := sc.state(id) - if id == 0 || state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - if st != nil && st.resetQueued { - // Already have a stream error in flight. Don't send another. - return nil - } - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the - // value of a content-length header field does not equal the sum of the - // DATA frame payload lengths that form the body. - return streamError(id, ErrCodeProtocol) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -func (sc *serverConn) processGoAway(f *GoAwayFrame) error { - sc.serveG.check() - if f.ErrCode != ErrCodeNo { - sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } else { - sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } - sc.startGracefulShutdownInternal() - // http://tools.ietf.org/html/rfc7540#section-6.8 - // We should not create any new streams, which means we should disable push. - sc.pushEnabled = false - return nil -} - -// isPushed reports whether the stream is server-initiated. -func (st *stream) isPushed() bool { - return st.id%2 == 0 -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -// onWriteTimeout is run on its own goroutine (from time.AfterFunc) -// when the stream's WriteTimeout has fired. -func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://tools.ietf.org/html/rfc7540#section-5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - if st := sc.streams[f.StreamID]; st != nil { - if st.resetQueued { - // We're sending RST_STREAM to close the stream, so don't bother - // processing this frame. - return nil - } - // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than - // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in - // this state, it MUST respond with a stream error (Section 5.4.2) of - // type STREAM_CLOSED. - if st.state == stateHalfClosedRemote { - return streamError(id, ErrCodeStreamClosed) - } - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxClientStreamID = id - - if sc.idleTimer != nil { - sc.idleTimer.Stop() - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.2 - // [...] Endpoints MUST NOT exceed the limit set by their peer. An - // endpoint that receives a HEADERS frame that causes their - // advertised concurrent stream limit to be exceeded MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR - // or REFUSED_STREAM. - if sc.curClientStreams+1 > sc.advMaxStreams { - if sc.unackedSettings == 0 { - // They should know better. - return streamError(id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(id, ErrCodeRefusedStream) - } - - initialState := stateOpen - if f.StreamEnded() { - initialState = stateHalfClosedRemote - } - st := sc.newStream(id, 0, initialState) - - if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { - return err - } - sc.writeSched.AdjustStream(st.id, f.Priority) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { - handler = new400Handler(err) - } - - // The net/http package sets the read deadline from the - // http.Server.ReadTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here after the request headers are read, - // similar to how the http1 server works. Here it's - // technically more like the http1 Server's ReadHeaderTimeout - // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !httpguts.ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func checkPriority(streamID uint32, p PriorityParam) error { - if streamID == p.StreamDep { - // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." - // Section 5.3.3 says that a stream can depend on one of its dependencies, - // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) - } - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { - return err - } - sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) - return nil -} - -func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { - sc.serveG.check() - if id == 0 { - panic("internal error: cannot create stream with id 0") - } - - ctx, cancelCtx := context.WithCancel(sc.baseCtx) - st := &stream{ - sc: sc, - id: id, - state: state, - ctx: ctx, - cancelCtx: cancelCtx, - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) - } - - sc.streams[id] = st - sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) - if st.isPushed() { - sc.curPushedStreams++ - } else { - sc.curClientStreams++ - } - if sc.curOpenStreams() == 1 { - sc.setConnState(http.StateActive) - } - - return st -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - } - - isConnect := rp.method == "CONNECT" - if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - rp.header = make(http.Header) - for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") - } - - rw, req, err := sc.newWriterAndRequestNoBody(st, rp) - if err != nil { - return nil, nil, err - } - if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - req.Body.(*requestBody).pipe = &pipe{ - b: &dataBuffer{expected: req.ContentLength}, - } - } - return rw, req, nil -} - -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { - tlsState = sc.tlsState - } - - needsContinue := rp.header.Get("Expect") == "100-continue" - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) - } - requestURI = rp.path - } - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - req := &http.Request{ - Method: rp.method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: rp.authority, - Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - sc.writeFrameFromHandler(FrameWriteRequest{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - // Same as net/http: - if e != nil && e != http.ErrAbortHandler { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - } - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(FrameWriteRequest{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { - sc.serveG.checkNotOn() // NOT on - if n > 0 { - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -// requestBody is the Handler's Request.Body type. -// Read and Close may be called concurrently. -type requestBody struct { - stream *stream - conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil || b.sawEOF { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if err == io.EOF { - b.sawEOF = true - } - if b.conn == nil && inTests { - return - } - b.conn.noteBodyReadFromHandler(b.stream, n, err) - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } - -func (rws *responseWriterState) hasNonemptyTrailers() bool { - for _, trailer := range rws.trailers { - if _, ok := rws.handlerHeader[trailer]; ok { - return true - } - } - return false -} - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !httpguts.ValidTrailerHeader(k) { - // Forbidden by RFC 7230, section 4.1.2. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), - // but respect "Connection" == "close" to mean sending a GOAWAY and tearing - // down the TCP connection when idle, like we do for HTTP/1. - // TODO: remove more Connection-specific header fields here, in addition - // to "Connection". - if _, ok := rws.snapHeader["Connection"]; ok { - v := rws.snapHeader.Get("Connection") - delete(rws.snapHeader, "Connection") - if v == "close" { - rws.conn.startGracefulShutdown() - } - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - rws.dirty = true - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - // only send trailers if they have actually been defined by the - // server handler. - hasNonemptyTrailers := rws.hasNonemptyTrailers() - endStream := rws.handlerDone && !hasNonemptyTrailers - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true - return 0, err - } - } - - if rws.handlerDone && hasNonemptyTrailers { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - if err != nil { - rws.dirty = true - } - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 7230 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - cw := rws.stream.cw - go func() { - cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. -func checkWriteHeaderCode(code int) { - // Issue 22880: require valid WriteHeader status codes. - // For now we only enforce that it's three digits. - // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes) - // and we might block under 200 (once we have more mature 1xx support). - // But for now any three digits. - // - // We used to send "HTTP/1.1 000 0" on the wire in responses but there's - // no equivalent bogus thing we can realistically send in HTTP/2, - // so we'll consistently panic instead and help people find their bugs - // early. (We can't return an error from WriteHeader even if we wanted to.) - if code < 100 || code > 999 { - panic(fmt.Sprintf("invalid WriteHeader code %v", code)) - } -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - checkWriteHeaderCode(code) - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler might call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - dirty := rws.dirty - rws.handlerDone = true - w.Flush() - w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } -} - -// Push errors. -var ( - ErrRecursivePush = errors.New("http2: recursive push not allowed") - ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") -) - -var _ http.Pusher = (*responseWriter)(nil) - -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - st := w.rws.stream - sc := st.sc - sc.serveG.checkNotOn() - - // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." - // http://tools.ietf.org/html/rfc7540#section-6.6 - if st.isPushed() { - return ErrRecursivePush - } - - if opts == nil { - opts = new(http.PushOptions) - } - - // Default options. - if opts.Method == "" { - opts.Method = "GET" - } - if opts.Header == nil { - opts.Header = http.Header{} - } - wantScheme := "http" - if w.rws.req.TLS != nil { - wantScheme = "https" - } - - // Validate the request. - u, err := url.Parse(target) - if err != nil { - return err - } - if u.Scheme == "" { - if !strings.HasPrefix(target, "/") { - return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) - } - u.Scheme = wantScheme - u.Host = w.rws.req.Host - } else { - if u.Scheme != wantScheme { - return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) - } - if u.Host == "" { - return errors.New("URL must have a host") - } - } - for k := range opts.Header { - if strings.HasPrefix(k, ":") { - return fmt.Errorf("promised request headers cannot include pseudo header %q", k) - } - // These headers are meaningful only if the request has a body, - // but PUSH_PROMISE requests cannot have a body. - // http://tools.ietf.org/html/rfc7540#section-8.2 - // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("promised request headers cannot include %q", k) - } - } - if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { - return err - } - - // The RFC effectively limits promised requests to GET and HEAD: - // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" - // http://tools.ietf.org/html/rfc7540#section-8.2 - if opts.Method != "GET" && opts.Method != "HEAD" { - return fmt.Errorf("method %q must be GET or HEAD", opts.Method) - } - - msg := &startPushRequest{ - parent: st, - method: opts.Method, - url: u, - header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case sc.serveMsgCh <- msg: - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case err := <-msg.done: - errChanPool.Put(msg.done) - return err - } -} - -type startPushRequest struct { - parent *stream - method string - url *url.URL - header http.Header - done chan error -} - -func (sc *serverConn) startPush(msg *startPushRequest) { - sc.serveG.check() - - // http://tools.ietf.org/html/rfc7540#section-6.6. - // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that - // is in either the "open" or "half-closed (remote)" state. - if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. - msg.done <- errStreamClosed - return - } - - // http://tools.ietf.org/html/rfc7540#section-6.6. - if !sc.pushEnabled { - msg.done <- http.ErrNotSupported - return - } - - // PUSH_PROMISE frames must be sent in increasing order by stream ID, so - // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE - // is written. Once the ID is allocated, we start the request handler. - allocatePromisedID := func() (uint32, error) { - sc.serveG.check() - - // Check this again, just in case. Technically, we might have received - // an updated SETTINGS by the time we got around to writing this frame. - if !sc.pushEnabled { - return 0, http.ErrNotSupported - } - // http://tools.ietf.org/html/rfc7540#section-6.5.2. - if sc.curPushedStreams+1 > sc.clientMaxStreams { - return 0, ErrPushLimitReached - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.1. - // Streams initiated by the server MUST use even-numbered identifiers. - // A server that is unable to establish a new stream identifier can send a GOAWAY - // frame so that the client is forced to open a new connection for new streams. - if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdownInternal() - return 0, ErrPushLimitReached - } - sc.maxPushPromiseID += 2 - promisedID := sc.maxPushPromiseID - - // http://tools.ietf.org/html/rfc7540#section-8.2. - // Strictly speaking, the new stream should start in "reserved (local)", then - // transition to "half closed (remote)" after sending the initial HEADERS, but - // we start in "half closed (remote)" for simplicity. - // See further comments at the definition of stateHalfClosedRemote. - promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE - }) - if err != nil { - // Should not happen, since we've already validated msg.url. - panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - return promisedID, nil - } - - sc.writeFrame(FrameWriteRequest{ - write: &writePushPromise{ - streamID: msg.parent.id, - method: msg.method, - url: msg.url, - h: msg.header, - allocatePromisedID: allocatePromisedID, - }, - stream: msg.parent, - done: msg.done, - }) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 7230 section 7 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2RequestHeaders(h http.Header) error { - for _, k := range connHeaders { - if _, ok := h[k]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", k) - } - } - te := h["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives -// disabled. See comments on h1ServerShutdownChan above for why -// the code is written this way. -func h1ServerKeepAlivesDisabled(hs *http.Server) bool { - var x interface{} = hs - type I interface { - doKeepAlives() bool - } - if hs, ok := x.(I); ok { - return !hs.doKeepAlives() - } - return false -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index aeac7d8a5..000000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,2610 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "context" - "crypto/rand" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - mathrand "math/rand" - "net" - "net/http" - "net/http/httptrace" - "net/textproto" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allowed. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // StrictMaxConcurrentStreams controls whether the server's - // SETTINGS_MAX_CONCURRENT_STREAMS should be respected - // globally. If false, new TCP connections are created to the - // server as needed to keep each under the per-connection - // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the - // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as - // a global limit and callers of RoundTrip block when needed, - // waiting for their turn. - StrictMaxConcurrentStreams bool - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It returns an error if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) - return err -} - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closing bool - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *httptrace.ClientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel waits for the user to cancel a request or for the done -// channel to be signaled. A non-nil error is returned only if the request was -// canceled. -func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { - ctx := req.Context() - if req.Cancel == nil && ctx.Done() == nil { - return nil - } - select { - case <-req.Cancel: - return errRequestCanceled - case <-ctx.Done(): - return ctx.Err() - case <-done: - return nil - } -} - -var got1xxFuncForTests func(int, textproto.MIMEHeader) error - -// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, -// if any. It returns nil if not set or if the Go version is too old. -func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { - if fn := got1xxFuncForTests; fn != nil { - return fn - } - return traceGot1xxResponseFunc(cs.trace) -} - -// awaitRequestCancel waits for the user to cancel a request, its context to -// expire, or for the request to be done (any way it might be removed from the -// cc.streams map: peer reset, successful completion, TCP connection breakage, -// etc). If the request is canceled, then cs will be canceled and closed. -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - if err := awaitRequestCancel(req, cs.done); err != nil { - cs.cancelStream() - cs.bufPipe.CloseWithError(err) - } -} - -func (cs *clientStream) cancelStream() { - cc := cs.cc - cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cc.mu.Unlock() - - if !didReset { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - cc.forgetStreamID(cs.ID) - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) getStartedWrite() bool { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - return cs.startedWrite -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -// noCachedConnError is the concrete type of ErrNoCachedConn, which -// needs to be detected by net/http regardless of whether it's its -// bundled version (in h2_bundle.go with a rewritten type name) or -// from a user's x/net/http2. As such, as it has a unique method name -// (IsHTTP2NoCachedConnError) that net/http sniffs for via func -// isNoCachedConnError. -type noCachedConnError struct{} - -func (noCachedConnError) IsHTTP2NoCachedConnError() {} -func (noCachedConnError) Error() string { return "http2: no cached connection was available" } - -// isNoCachedConnError reports whether err is of type noCachedConnError -// or its equivalent renamed type in net/http2's h2_bundle.go. Both types -// may coexist in the same running program. -func isNoCachedConnError(err error) bool { - _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) - return ok -} - -var ErrNoCachedConn error = noCachedConnError{} - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for retry := 0; ; retry++ { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) - traceGotConn(req, cc, reused) - res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) - if err != nil && retry <= 6 { - if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { - // After the first retry, do exponential backoff with 10% jitter. - if retry == 0 { - continue - } - backoff := float64(uint(1) << (uint(retry) - 1)) - backoff += backoff * (0.1 * mathrand.Float64()) - select { - case <-time.After(time.Second * time.Duration(backoff)): - continue - case <-req.Context().Done(): - return nil, req.Context().Err() - } - } - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") -) - -// shouldRetryRequest is called by RoundTrip when a request fails to get -// response headers. It is always called with a non-nil error. -// It returns either a request to retry (either the same request, or a -// modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { - if !canRetryError(err) { - return nil, err - } - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || req.Body == http.NoBody { - return req, nil - } - - // If the request body can be reset back to its original - // state via the optional req.GetBody, do that. - if req.GetBody != nil { - // TODO: consider a req.Body.Close here? or audit that all caller paths do? - body, err := req.GetBody() - if err != nil { - return nil, err - } - newReq := *req - newReq.Body = body - return &newReq, nil - } - - // The Request.Body can't reset back to the beginning, but we - // don't seem to have started to read from it yet, so reuse - // the request directly. The "afterBodyWrite" means the - // bodyWrite process has started, which becomes true before - // the first Read. - if !afterBodyWrite { - return req, nil - } - - return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) -} - -func canRetryError(err error) bool { - if err == errClientConnUnusable || err == errClientConnGotGoAway { - return true - } - if se, ok := err.(StreamError); ok { - return se.Code == ErrCodeRefusedStream - } - return false -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *t.TLSClientConfig.Clone() - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return t.t1.ExpectContinueTimeout -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if t.AllowHTTP { - cc.nextStreamID = 3 - } - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } - last := f.LastStreamID - for streamID, cs := range cc.streams { - if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } - } - } -} - -// CanTakeNewRequest reports whether the connection can take a new request, -// meaning it has not been closed or received or sent a GOAWAY. -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -// clientConnIdleState describes the suitability of a client -// connection to initiate a new RoundTrip request. -type clientConnIdleState struct { - canTakeNewRequest bool - freshConn bool // whether it's unused by any previous request -} - -func (cc *ClientConn) idleState() clientConnIdleState { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.idleStateLocked() -} - -func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { - if cc.singleUse && cc.nextStreamID > 1 { - return - } - var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { - // We'll tell the caller we can take a new request to - // prevent the caller from dialing a new TCP - // connection, but then we'll block later before - // writing it. - maxConcurrentOkay = true - } else { - maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) - } - - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 - st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest - return -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - st := cc.idleStateLocked() - return st.canTakeNewRequest -} - -// onIdleTimeout is called from a time.AfterFunc goroutine. It will -// only be called when we're idle, but because we're coming from a new -// goroutine, there could be a new request coming in at the same time, -// so this simply calls the synchronized closeIfIdle to shut down this -// connection. The timer could just call closeIfIdle, but this is more -// clear. -func (cc *ClientConn) onIdleTimeout() { - cc.closeIfIdle() -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -var shutdownEnterWaitStateHook = func() {} - -// Shutdown gracefully close the client connection, waiting for running streams to complete. -func (cc *ClientConn) Shutdown(ctx context.Context) error { - if err := cc.sendGoAway(); err != nil { - return err - } - // Wait for all in-flight streams to complete or connection to close - done := make(chan error, 1) - cancelled := false // guarded by cc.mu - go func() { - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if len(cc.streams) == 0 || cc.closed { - cc.closed = true - done <- cc.tconn.Close() - break - } - if cancelled { - break - } - cc.cond.Wait() - } - }() - shutdownEnterWaitStateHook() - select { - case err := <-done: - return err - case <-ctx.Done(): - cc.mu.Lock() - // Free the goroutine above - cancelled = true - cc.cond.Broadcast() - cc.mu.Unlock() - return ctx.Err() - } -} - -func (cc *ClientConn) sendGoAway() error { - cc.mu.Lock() - defer cc.mu.Unlock() - cc.wmu.Lock() - defer cc.wmu.Unlock() - if cc.closing { - // GOAWAY sent already - return nil - } - // Send a graceful shutdown frame to server - maxStreamID := cc.nextStreamID - if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { - return err - } - if err := cc.bw.Flush(); err != nil { - return err - } - // Prevent new requests - cc.closing = true - return nil -} - -// Close closes the client connection immediately. -// -// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. -func (cc *ClientConn) Close() error { - cc.mu.Lock() - defer cc.cond.Broadcast() - defer cc.mu.Unlock() - err := errors.New("http2: client connection force closed via ClientConn.Close") - for id, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - cs.bufPipe.CloseWithError(err) - delete(cc.streams, id) - } - cc.closed = true - return cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil || req.Body == http.NoBody { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - resp, _, err := cc.roundTrip(req) - return resp, err -} - -func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { - if err := checkConnHeaders(req); err != nil { - return nil, false, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, false, err - } - hasTrailers := trailers != "" - - cc.mu.Lock() - if err := cc.awaitOpenSlotForRequest(req); err != nil { - cc.mu.Unlock() - return nil, false, err - } - - body := req.Body - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, false, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = httptrace.ContextClientTrace(req.Context()) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, false, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := req.Context() - - handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, false, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errTimeout - case <-ctx.Done(): - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), ctx.Err() - case <-req.Cancel: - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.getStartedWrite(), cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - cc.forgetStreamID(cs.ID) - return nil, cs.getStartedWrite(), err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. -// Must hold cc.mu. -func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { - var waitingForConn chan struct{} - var waitingForConnErr error // guarded by cc.mu - for { - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - if waitingForConn != nil { - close(waitingForConn) - } - return errClientConnUnusable - } - if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { - if waitingForConn != nil { - close(waitingForConn) - } - return nil - } - // Unfortunately, we cannot wait on a condition variable and channel at - // the same time, so instead, we spin up a goroutine to check if the - // request is canceled while we wait for a slot to open in the connection. - if waitingForConn == nil { - waitingForConn = make(chan struct{}) - go func() { - if err := awaitRequestCancel(req, waitingForConn); err != nil { - cc.mu.Lock() - waitingForConnErr = err - cc.cond.Broadcast() - cc.mu.Unlock() - } - }() - } - cc.pendingRequests++ - cc.cond.Wait() - cc.pendingRequests-- - if waitingForConnErr != nil { - return waitingForConnErr - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > maxFrameSize { - chunk = chunk[:maxFrameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls, err = cc.encodeTrailers(req) - cc.mu.Unlock() - if err != nil { - cc.writeStreamReset(cs.ID, ErrCodeInternal, err) - cc.forgetStreamID(cs.ID) - return err - } - } - - cc.mu.Lock() - maxFrameSize := int(cc.maxFrameSize) - cc.mu.Unlock() - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || - strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || - strings.EqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if strings.EqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name = strings.ToLower(name) - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { - cc.hbuf.Reset() - - hlSize := uint64(0) - for k, vv := range req.Trailer { - for _, v := range vv { - hf := hpack.HeaderField{Name: k, Value: v} - hlSize += uint64(hf.Size()) - } - } - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filtered at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes(), nil -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - } - close(cs.done) - // Wake up checkResetOrDone via clientStream.awaitFlowControl and - // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{cc: cc} - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range cc.streams { - cs.bufPipe.CloseWithError(err) // no-op if already closed - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, false); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - cs.cc.forgetStreamID(cs.ID) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if f.StreamEnded() { - // Issue 20521: If the stream has ended, streamByID() causes - // clientStream.done to be closed, which causes the request's bodyWriter - // to be closed with an errStreamClosed, which may be received by - // clientConn.RoundTrip before the result of processing these headers. - // Deferring stream closure allows the header processing to occur first. - // clientConn.RoundTrip may still receive the bodyWriter error first, but - // the fix for issue 16102 prioritises any response. - // - // Issue 22413: If there is no request body, we should close the - // stream before writing to cs.resc so that the stream is closed - // immediately once RoundTrip returns. - if cs.req.Body != nil { - defer cc.forgetStreamID(f.StreamID) - } else { - cc.forgetStreamID(f.StreamID) - } - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cc.forgetStreamID(cs.ID) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 1xx responses). -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("malformed response from server: missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - if statusCode >= 100 && statusCode <= 199 { - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } - if fn := cs.get1xxTraceFunc(); fn != nil { - if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { - return nil, err - } - } - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - res.Uncompressed = true - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - cs.didReset = true - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - cc.forgetStreamID(cs.ID) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if !cs.firstByte { - cc.logf("protocol error: received DATA before a HEADERS frame") - rl.endStreamError(cs, StreamError{ - StreamID: f.StreamID, - Code: ErrCodeProtocol, - }) - return nil - } - if f.Length > 0 { - if cs.req.Method == "HEAD" && len(data) > 0 { - cc.logf("protocol error: received DATA on a HEAD request") - rl.endStreamError(cs, StreamError{ - StreamID: f.StreamID, - Code: ErrCodeProtocol, - }) - return nil - } - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - var refund int - if pad := int(f.Length) - len(data); pad > 0 { - refund += pad - } - // Return len(data) now if the stream is already closed, - // since data will never be read. - didReset := cs.didReset - if didReset { - refund += len(data) - } - if refund > 0 { - cc.inflow.add(int32(refund)) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(refund)) - if !didReset { - cs.inflow.add(int32(refund)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) - } - cc.bw.Flush() - cc.wmu.Unlock() - } - cc.mu.Unlock() - - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - cs.bufPipe.closeWithErrorAndCode(err, code) - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingMaxHeaderListSize: - cc.peerMaxHeaderListSize = uint64(s.Val) - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - return nil -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - c := make(chan struct{}) - // Generate a random payload - var p [8]byte - for { - if _, err := rand.Read(p[:]); err != nil { - return err - } - cc.mu.Lock() - // check for dup before insert - if _, found := cc.pings[p]; !found { - cc.pings[p] = c - cc.mu.Unlock() - break - } - cc.mu.Unlock() - } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() - select { - case <-c: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-cc.readerDone: - // connection closed - return cc.readerErr - } -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - // If ack, notify listener if any - if c, ok := cc.pings[f.Data]; ok { - close(c) - delete(cc.pings, f.Data) - } - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httpguts.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -// (The field is exported so it can be accessed via reflect from net/http; tested -// by TestNoDialH2RoundTripperType) -type noDialH2RoundTripper struct{ *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.Transport.RoundTrip(req) - if isNoCachedConnError(err) { - return nil, http.ErrSkipAltProtocol - } - return res, err -} - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func traceGetConn(req *http.Request, hostPort string) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GetConn == nil { - return - } - trace.GetConn(hostPort) -} - -func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - ci.Reused = reused - cc.mu.Lock() - ci.WasIdle = len(cc.streams) == 0 && reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *httptrace.ClientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *httptrace.ClientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *httptrace.ClientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *httptrace.ClientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *httptrace.ClientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 3849bc263..000000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/url" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2/hpack" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error - - // staysWithinBuffer reports whether this writer promises that - // it will only write less than or equal to size bytes, and it - // won't Flush the write context. - staysWithinBuffer(size int) bool -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// writeEndsStream reports whether w writes a frame that will transition -// the stream to a half-closed local state. This returns false for RST_STREAM, -// which closes the entire stream (not just the local half). -func writeEndsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("writeEndsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } - -type writeSettings []Setting - -func (s writeSettings) staysWithinBuffer(max int) bool { - const settingSize = 6 // uint16 + uint32 - return frameHeaderLen+settingSize*len(s) <= max - -} - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - ctx.Flush() // ignore error: we're hanging up on them anyway - return err -} - -func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -func (w *writeData) staysWithinBuffer(max int) bool { - return frameHeaderLen+len(w.p) <= max -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } - -// splitHeaderBlock splits headerBlock into fragments so that each fragment fits -// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true -// for the first/last fragment, respectively. -func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { - return err - } - first = false - } - return nil -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) staysWithinBuffer(max int) bool { - // TODO: this is a common one. It'd be nice to return true - // here and get into the fast path if we could be clever and - // calculate the size fast enough, or at least a conservative - // upper bound that usually fires. (Maybe if w.h and - // w.trailers are nil, so we don't need to enumerate it.) - // Otherwise I'm afraid that just calculating the length to - // answer this question would be slower than the ~2µs benefit. - return false -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. -type writePushPromise struct { - streamID uint32 // pusher stream - method string // for :method - url *url.URL // for :scheme, :authority, :path - h http.Header - - // Creates an ID for a pushed stream. This runs on serveG just before - // the frame is written. The returned ID is copied to promisedID. - allocatePromisedID func() (uint32, error) - promisedID uint32 -} - -func (w *writePushPromise) staysWithinBuffer(max int) bool { - // TODO: see writeResHeaders.staysWithinBuffer - return false -} - -func (w *writePushPromise) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - encKV(enc, ":method", w.method) - encKV(enc, ":scheme", w.url.Scheme) - encKV(enc, ":authority", w.url.Host) - encKV(enc, ":path", w.url.RequestURI()) - encodeHeaders(enc, w.h, nil) - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WritePushPromise(PushPromiseParam{ - StreamID: w.streamID, - PromiseID: w.promisedID, - BlockFragment: frag, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { - // Sloppy but conservative: - return 9+2*(len(":status")+len("100")) <= max -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only if k is in keys. -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index f24d2b1e7..000000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// WriteScheduler is the interface implemented by HTTP/2 write schedulers. -// Methods are never called concurrently. -type WriteScheduler interface { - // OpenStream opens a new stream in the write scheduler. - // It is illegal to call this with streamID=0 or with a streamID that is - // already open -- the call may panic. - OpenStream(streamID uint32, options OpenStreamOptions) - - // CloseStream closes a stream in the write scheduler. Any frames queued on - // this stream should be discarded. It is illegal to call this on a stream - // that is not open -- the call may panic. - CloseStream(streamID uint32) - - // AdjustStream adjusts the priority of the given stream. This may be called - // on a stream that has not yet been opened or has been closed. Note that - // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: - // https://tools.ietf.org/html/rfc7540#section-5.1 - AdjustStream(streamID uint32, priority PriorityParam) - - // Push queues a frame in the scheduler. In most cases, this will not be - // called with wr.StreamID()!=0 unless that stream is currently open. The one - // exception is RST_STREAM frames, which may be sent on idle or closed streams. - Push(wr FrameWriteRequest) - - // Pop dequeues the next frame to write. Returns false if no frames can - // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. No frames should be discarded except by CloseStream. - Pop() (wr FrameWriteRequest, ok bool) -} - -// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. -type OpenStreamOptions struct { - // PusherID is zero if the stream was initiated by the client. Otherwise, - // PusherID names the stream that pushed the newly opened stream. - PusherID uint32 -} - -// FrameWriteRequest is a request to write a frame. -type FrameWriteRequest struct { - // write is the interface value that does the writing, once the - // WriteScheduler has selected this frame to write. The write - // functions are all defined in write.go. - write writeFramer - - // stream is the stream on which this frame will be written. - // nil for non-stream frames like PING and SETTINGS. - stream *stream - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// StreamID returns the id of the stream this frame will be written to. -// 0 is used for non-stream frames such as PING and SETTINGS. -func (wr FrameWriteRequest) StreamID() uint32 { - if wr.stream == nil { - if se, ok := wr.write.(StreamError); ok { - // (*serverConn).resetStream doesn't set - // stream because it doesn't necessarily have - // one. So special case this type of write - // message. - return se.StreamID - } - return 0 - } - return wr.stream.id -} - -// isControl reports whether wr is a control frame for MaxQueuedControlFrames -// purposes. That includes non-stream frames and RST_STREAM frames. -func (wr FrameWriteRequest) isControl() bool { - return wr.stream == nil -} - -// DataSize returns the number of flow control bytes that must be consumed -// to write this entire frame. This is 0 for non-DATA frames. -func (wr FrameWriteRequest) DataSize() int { - if wd, ok := wr.write.(*writeData); ok { - return len(wd.p) - } - return 0 -} - -// Consume consumes min(n, available) bytes from this frame, where available -// is the number of flow control bytes available on the stream. Consume returns -// 0, 1, or 2 frames, where the integer return value gives the number of frames -// returned. -// -// If flow control prevents consuming any bytes, this returns (_, _, 0). If -// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this -// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and -// 'rest' contains the remaining bytes. The consumed bytes are deducted from the -// underlying stream's flow control budget. -func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { - var empty FrameWriteRequest - - // Non-DATA frames are always consumed whole. - wd, ok := wr.write.(*writeData) - if !ok || len(wd.p) == 0 { - return wr, empty, 1 - } - - // Might need to split after applying limits. - allowed := wr.stream.flow.available() - if n < allowed { - allowed = n - } - if wr.stream.sc.maxFrameSize < allowed { - allowed = wr.stream.sc.maxFrameSize - } - if allowed <= 0 { - return empty, empty, 0 - } - if len(wd.p) > int(allowed) { - wr.stream.flow.take(allowed) - consumed := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[:allowed], - // Even if the original had endStream set, there - // are bytes remaining because len(wd.p) > allowed, - // so we know endStream is false. - endStream: false, - }, - // Our caller is blocking on the final DATA frame, not - // this intermediate frame, so no need to wait. - done: nil, - } - rest := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[allowed:], - endStream: wd.endStream, - }, - done: wr.done, - } - return consumed, rest, 2 - } - - // The frame is consumed whole. - // NB: This cast cannot overflow because allowed is <= math.MaxInt32. - wr.stream.flow.take(int32(len(wd.p))) - return wr, empty, 1 -} - -// String is for debugging only. -func (wr FrameWriteRequest) String() string { - var des string - if s, ok := wr.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wr.write) - } - return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) -} - -// replyToWriter sends err to wr.done and panics if the send must block -// This does nothing if wr.done is nil. -func (wr *FrameWriteRequest) replyToWriter(err error) { - if wr.done == nil { - return - } - select { - case wr.done <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) - } - wr.write = nil // prevent use (assume it's tainted after wr.done send) -} - -// writeQueue is used by implementations of WriteScheduler. -type writeQueue struct { - s []FrameWriteRequest -} - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) -} - -func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] - return wr -} - -// consume consumes up to n bytes from q.s[0]. If the frame is -// entirely consumed, it is removed from the queue. If the frame -// is partially consumed, the frame is kept with the consumed -// bytes removed. Returns true iff any bytes were consumed. -func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { - return FrameWriteRequest{}, false - } - consumed, rest, numresult := q.s[0].Consume(n) - switch numresult { - case 0: - return FrameWriteRequest{}, false - case 1: - q.shift() - case 2: - q.s[0] = rest - } - return consumed, true -} - -type writeQueuePool []*writeQueue - -// put inserts an unused writeQueue into the pool. -func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} - } - q.s = q.s[:0] - *p = append(*p, q) -} - -// get returns an empty writeQueue. -func (p *writeQueuePool) get() *writeQueue { - ln := len(*p) - if ln == 0 { - return new(writeQueue) - } - x := ln - 1 - q := (*p)[x] - (*p)[x] = nil - *p = (*p)[:x] - return q -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index 848fed6ec..000000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if id := wr.StreamID(); id == 0 { - n = &ws.root - } else { - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go deleted file mode 100644 index 9a7b9e581..000000000 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "math" - -// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 -// priorities. Control frames like SETTINGS and PING are written before DATA -// frames, but if no control frames are queued and multiple streams have queued -// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. -func NewRandomWriteScheduler() WriteScheduler { - return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} -} - -type randomWriteScheduler struct { - // zero are frames not associated with a specific stream. - zero writeQueue - - // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle, closed, or emptied, it's deleted - // from the map. - sq map[uint32]*writeQueue - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // no-op: idle streams are not tracked -} - -func (ws *randomWriteScheduler) CloseStream(streamID uint32) { - q, ok := ws.sq[streamID] - if !ok { - return - } - delete(ws.sq, streamID) - ws.queuePool.put(q) -} - -func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - // no-op: priorities are ignored -} - -func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { - ws.zero.push(wr) - return - } - q, ok := ws.sq[id] - if !ok { - q = ws.queuePool.get() - ws.sq[id] = q - } - q.push(wr) -} - -func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - // Iterate over all non-idle streams until finding one that can be consumed. - for streamID, q := range ws.sq { - if wr, ok := q.consume(math.MaxInt32); ok { - if q.empty() { - delete(ws.sq, streamID) - ws.queuePool.put(q) - } - return wr, true - } - } - return FrameWriteRequest{}, false -} diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go deleted file mode 100644 index a98a31f40..000000000 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ /dev/null @@ -1,734 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -// Package idna implements IDNA2008 using the compatibility processing -// defined by UTS (Unicode Technical Standard) #46, which defines a standard to -// deal with the transition from IDNA2003. -// -// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC -// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. -// UTS #46 is defined in https://www.unicode.org/reports/tr46. -// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the -// differences between these two standards. -package idna // import "golang.org/x/net/idna" - -import ( - "fmt" - "strings" - "unicode/utf8" - - "golang.org/x/text/secure/bidirule" - "golang.org/x/text/unicode/bidi" - "golang.org/x/text/unicode/norm" -) - -// NOTE: Unlike common practice in Go APIs, the functions will return a -// sanitized domain name in case of errors. Browsers sometimes use a partially -// evaluated string as lookup. -// TODO: the current error handling is, in my opinion, the least opinionated. -// Other strategies are also viable, though: -// Option 1) Return an empty string in case of error, but allow the user to -// specify explicitly which errors to ignore. -// Option 2) Return the partially evaluated string if it is itself a valid -// string, otherwise return the empty string in case of error. -// Option 3) Option 1 and 2. -// Option 4) Always return an empty string for now and implement Option 1 as -// needed, and document that the return string may not be empty in case of -// error in the future. -// I think Option 1 is best, but it is quite opinionated. - -// ToASCII is a wrapper for Punycode.ToASCII. -func ToASCII(s string) (string, error) { - return Punycode.process(s, true) -} - -// ToUnicode is a wrapper for Punycode.ToUnicode. -func ToUnicode(s string) (string, error) { - return Punycode.process(s, false) -} - -// An Option configures a Profile at creation time. -type Option func(*options) - -// Transitional sets a Profile to use the Transitional mapping as defined in UTS -// #46. This will cause, for example, "ß" to be mapped to "ss". Using the -// transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This -// option is only meaningful if combined with MapForLookup. -func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } -} - -// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts -// are longer than allowed by the RFC. -func VerifyDNSLength(verify bool) Option { - return func(o *options) { o.verifyDNSLength = verify } -} - -// RemoveLeadingDots removes leading label separators. Leading runes that map to -// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. -func RemoveLeadingDots(remove bool) Option { - return func(o *options) { o.removeLeadingDots = remove } -} - -// ValidateLabels sets whether to check the mandatory label validation criteria -// as defined in Section 5.4 of RFC 5891. This includes testing for correct use -// of hyphens ('-'), normalization, validity of runes, and the context rules. -func ValidateLabels(enable bool) Option { - return func(o *options) { - // Don't override existing mappings, but set one that at least checks - // normalization if it is not set. - if o.mapping == nil && enable { - o.mapping = normalize - } - o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode - } -} - -// StrictDomainName limits the set of permissible ASCII characters to those -// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. -// -// This option is useful, for instance, for browsers that allow characters -// outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. -func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } -} - -// NOTE: the following options pull in tables. The tables should not be linked -// in as long as the options are not used. - -// BidiRule enables the Bidi rule as defined in RFC 5893. Any application -// that relies on proper validation of labels should include this rule. -func BidiRule() Option { - return func(o *options) { o.bidirule = bidirule.ValidString } -} - -// ValidateForRegistration sets validation options to verify that a given IDN is -// properly formatted for registration as defined by Section 4 of RFC 5891. -func ValidateForRegistration() Option { - return func(o *options) { - o.mapping = validateRegistration - StrictDomainName(true)(o) - ValidateLabels(true)(o) - VerifyDNSLength(true)(o) - BidiRule()(o) - } -} - -// MapForLookup sets validation and mapping options such that a given IDN is -// transformed for domain name lookup according to the requirements set out in -// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, -// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option -// to add this check. -// -// The mappings include normalization and mapping case, width and other -// compatibility mappings. -func MapForLookup() Option { - return func(o *options) { - o.mapping = validateAndMap - StrictDomainName(true)(o) - ValidateLabels(true)(o) - } -} - -type options struct { - transitional bool - useSTD3Rules bool - validateLabels bool - verifyDNSLength bool - removeLeadingDots bool - - trie *idnaTrie - - // fromPuny calls validation rules when converting A-labels to U-labels. - fromPuny func(p *Profile, s string) error - - // mapping implements a validation and mapping step as defined in RFC 5895 - // or UTS 46, tailored to, for example, domain registration or lookup. - mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) - - // bidirule, if specified, checks whether s conforms to the Bidi Rule - // defined in RFC 5893. - bidirule func(s string) bool -} - -// A Profile defines the configuration of an IDNA mapper. -type Profile struct { - options -} - -func apply(o *options, opts []Option) { - for _, f := range opts { - f(o) - } -} - -// New creates a new Profile. -// -// With no options, the returned Profile is the most permissive and equals the -// Punycode Profile. Options can be passed to further restrict the Profile. The -// MapForLookup and ValidateForRegistration options set a collection of options, -// for lookup and registration purposes respectively, which can be tailored by -// adding more fine-grained options, where later options override earlier -// options. -func New(o ...Option) *Profile { - p := &Profile{} - apply(&p.options, o) - return p -} - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". If an error is encountered it will return -// an error and a (partially) processed result. -func (p *Profile) ToASCII(s string) (string, error) { - return p.process(s, true) -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". If an error is encountered it will return -// an error and a (partially) processed result. -func (p *Profile) ToUnicode(s string) (string, error) { - pp := *p - pp.transitional = false - return pp.process(s, false) -} - -// String reports a string with a description of the profile for debugging -// purposes. The string format may change with different versions. -func (p *Profile) String() string { - s := "" - if p.transitional { - s = "Transitional" - } else { - s = "NonTransitional" - } - if p.useSTD3Rules { - s += ":UseSTD3Rules" - } - if p.validateLabels { - s += ":ValidateLabels" - } - if p.verifyDNSLength { - s += ":VerifyDNSLength" - } - return s -} - -var ( - // Punycode is a Profile that does raw punycode processing with a minimum - // of validation. - Punycode *Profile = punycode - - // Lookup is the recommended profile for looking up domain names, according - // to Section 5 of RFC 5891. The exact configuration of this profile may - // change over time. - Lookup *Profile = lookup - - // Display is the recommended profile for displaying domain names. - // The configuration of this profile may change over time. - Display *Profile = display - - // Registration is the recommended profile for checking whether a given - // IDN is valid for registration, according to Section 4 of RFC 5891. - Registration *Profile = registration - - punycode = &Profile{} - lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, - }} - display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, - }} - registration = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - verifyDNSLength: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateRegistration, - bidirule: bidirule.ValidString, - }} - - // TODO: profiles - // Register: recommended for approving domain names: don't do any mappings - // but rather reject on invalid input. Bundle or block deviation characters. -) - -type labelError struct{ label, code_ string } - -func (e labelError) code() string { return e.code_ } -func (e labelError) Error() string { - return fmt.Sprintf("idna: invalid label %q", e.label) -} - -type runeError rune - -func (e runeError) code() string { return "P1" } -func (e runeError) Error() string { - return fmt.Sprintf("idna: disallowed rune %U", e) -} - -// process implements the algorithm described in section 4 of UTS #46, -// see https://www.unicode.org/reports/tr46. -func (p *Profile) process(s string, toASCII bool) (string, error) { - var err error - var isBidi bool - if p.mapping != nil { - s, isBidi, err = p.mapping(p, s) - } - // Remove leading empty labels. - if p.removeLeadingDots { - for ; len(s) > 0 && s[0] == '.'; s = s[1:] { - } - } - // TODO: allow for a quick check of the tables data. - // It seems like we should only create this error on ToASCII, but the - // UTS 46 conformance tests suggests we should always check this. - if err == nil && p.verifyDNSLength && s == "" { - err = &labelError{s, "A4"} - } - labels := labelIter{orig: s} - for ; !labels.done(); labels.next() { - label := labels.label() - if label == "" { - // Empty labels are not okay. The label iterator skips the last - // label if it is empty. - if err == nil && p.verifyDNSLength { - err = &labelError{s, "A4"} - } - continue - } - if strings.HasPrefix(label, acePrefix) { - u, err2 := decode(label[len(acePrefix):]) - if err2 != nil { - if err == nil { - err = err2 - } - // Spec says keep the old label. - continue - } - isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight - labels.set(u) - if err == nil && p.validateLabels { - err = p.fromPuny(p, u) - } - if err == nil { - // This should be called on NonTransitional, according to the - // spec, but that currently does not have any effect. Use the - // original profile to preserve options. - err = p.validateLabel(u) - } - } else if err == nil { - err = p.validateLabel(label) - } - } - if isBidi && p.bidirule != nil && err == nil { - for labels.reset(); !labels.done(); labels.next() { - if !p.bidirule(labels.label()) { - err = &labelError{s, "B"} - break - } - } - } - if toASCII { - for labels.reset(); !labels.done(); labels.next() { - label := labels.label() - if !ascii(label) { - a, err2 := encode(acePrefix, label) - if err == nil { - err = err2 - } - label = a - labels.set(a) - } - n := len(label) - if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { - err = &labelError{label, "A4"} - } - } - } - s = labels.result() - if toASCII && p.verifyDNSLength && err == nil { - // Compute the length of the domain name minus the root label and its dot. - n := len(s) - if n > 0 && s[n-1] == '.' { - n-- - } - if len(s) < 1 || n > 253 { - err = &labelError{s, "A4"} - } - } - return s, err -} - -func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { - // TODO: consider first doing a quick check to see if any of these checks - // need to be done. This will make it slower in the general case, but - // faster in the common case. - mapped = norm.NFC.String(s) - isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft - return mapped, isBidi, nil -} - -func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { - // TODO: filter need for normalization in loop below. - if !norm.NFC.IsNormalString(s) { - return s, false, &labelError{s, "V1"} - } - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - if sz == 0 { - return s, bidi, runeError(utf8.RuneError) - } - bidi = bidi || info(v).isBidi(s[i:]) - // Copy bytes not copied so far. - switch p.simplify(info(v).category()) { - // TODO: handle the NV8 defined in the Unicode idna data set to allow - // for strict conformance to IDNA2008. - case valid, deviation: - case disallowed, mapped, unknown, ignored: - r, _ := utf8.DecodeRuneInString(s[i:]) - return s, bidi, runeError(r) - } - i += sz - } - return s, bidi, nil -} - -func (c info) isBidi(s string) bool { - if !c.isMapped() { - return c&attributesMask == rtl - } - // TODO: also store bidi info for mapped data. This is possible, but a bit - // cumbersome and not for the common case. - p, _ := bidi.LookupString(s) - switch p.Class() { - case bidi.R, bidi.AL, bidi.AN: - return true - } - return false -} - -func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { - var ( - b []byte - k int - ) - // combinedInfoBits contains the or-ed bits of all runes. We use this - // to derive the mayNeedNorm bit later. This may trigger normalization - // overeagerly, but it will not do so in the common case. The end result - // is another 10% saving on BenchmarkProfile for the common case. - var combinedInfoBits info - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - if sz == 0 { - b = append(b, s[k:i]...) - b = append(b, "\ufffd"...) - k = len(s) - if err == nil { - err = runeError(utf8.RuneError) - } - break - } - combinedInfoBits |= info(v) - bidi = bidi || info(v).isBidi(s[i:]) - start := i - i += sz - // Copy bytes not copied so far. - switch p.simplify(info(v).category()) { - case valid: - continue - case disallowed: - if err == nil { - r, _ := utf8.DecodeRuneInString(s[start:]) - err = runeError(r) - } - continue - case mapped, deviation: - b = append(b, s[k:start]...) - b = info(v).appendMapping(b, s[start:i]) - case ignored: - b = append(b, s[k:start]...) - // drop the rune - case unknown: - b = append(b, s[k:start]...) - b = append(b, "\ufffd"...) - } - k = i - } - if k == 0 { - // No changes so far. - if combinedInfoBits&mayNeedNorm != 0 { - s = norm.NFC.String(s) - } - } else { - b = append(b, s[k:]...) - if norm.NFC.QuickSpan(b) != len(b) { - b = norm.NFC.Bytes(b) - } - // TODO: the punycode converters require strings as input. - s = string(b) - } - return s, bidi, err -} - -// A labelIter allows iterating over domain name labels. -type labelIter struct { - orig string - slice []string - curStart int - curEnd int - i int -} - -func (l *labelIter) reset() { - l.curStart = 0 - l.curEnd = 0 - l.i = 0 -} - -func (l *labelIter) done() bool { - return l.curStart >= len(l.orig) -} - -func (l *labelIter) result() string { - if l.slice != nil { - return strings.Join(l.slice, ".") - } - return l.orig -} - -func (l *labelIter) label() string { - if l.slice != nil { - return l.slice[l.i] - } - p := strings.IndexByte(l.orig[l.curStart:], '.') - l.curEnd = l.curStart + p - if p == -1 { - l.curEnd = len(l.orig) - } - return l.orig[l.curStart:l.curEnd] -} - -// next sets the value to the next label. It skips the last label if it is empty. -func (l *labelIter) next() { - l.i++ - if l.slice != nil { - if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { - l.curStart = len(l.orig) - } - } else { - l.curStart = l.curEnd + 1 - if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { - l.curStart = len(l.orig) - } - } -} - -func (l *labelIter) set(s string) { - if l.slice == nil { - l.slice = strings.Split(l.orig, ".") - } - l.slice[l.i] = s -} - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -func (p *Profile) simplify(cat category) category { - switch cat { - case disallowedSTD3Mapped: - if p.useSTD3Rules { - cat = disallowed - } else { - cat = mapped - } - case disallowedSTD3Valid: - if p.useSTD3Rules { - cat = disallowed - } else { - cat = valid - } - case deviation: - if !p.transitional { - cat = valid - } - case validNV8, validXV8: - // TODO: handle V2008 - cat = valid - } - return cat -} - -func validateFromPunycode(p *Profile, s string) error { - if !norm.NFC.IsNormalString(s) { - return &labelError{s, "V1"} - } - // TODO: detect whether string may have to be normalized in the following - // loop. - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - if sz == 0 { - return runeError(utf8.RuneError) - } - if c := p.simplify(info(v).category()); c != valid && c != deviation { - return &labelError{s, "V6"} - } - i += sz - } - return nil -} - -const ( - zwnj = "\u200c" - zwj = "\u200d" -) - -type joinState int8 - -const ( - stateStart joinState = iota - stateVirama - stateBefore - stateBeforeVirama - stateAfter - stateFAIL -) - -var joinStates = [][numJoinTypes]joinState{ - stateStart: { - joiningL: stateBefore, - joiningD: stateBefore, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateVirama, - }, - stateVirama: { - joiningL: stateBefore, - joiningD: stateBefore, - }, - stateBefore: { - joiningL: stateBefore, - joiningD: stateBefore, - joiningT: stateBefore, - joinZWNJ: stateAfter, - joinZWJ: stateFAIL, - joinVirama: stateBeforeVirama, - }, - stateBeforeVirama: { - joiningL: stateBefore, - joiningD: stateBefore, - joiningT: stateBefore, - }, - stateAfter: { - joiningL: stateFAIL, - joiningD: stateBefore, - joiningT: stateAfter, - joiningR: stateStart, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateAfter, // no-op as we can't accept joiners here - }, - stateFAIL: { - 0: stateFAIL, - joiningL: stateFAIL, - joiningD: stateFAIL, - joiningT: stateFAIL, - joiningR: stateFAIL, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateFAIL, - }, -} - -// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are -// already implicitly satisfied by the overall implementation. -func (p *Profile) validateLabel(s string) (err error) { - if s == "" { - if p.verifyDNSLength { - return &labelError{s, "A4"} - } - return nil - } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} - } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} - } - // TODO: merge the use of this in the trie. - v, sz := trie.lookupString(s) - x := info(v) - if x.isModifier() { - return &labelError{s, "V5"} - } - // Quickly return in the absence of zero-width (non) joiners. - if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { - return nil - } - st := stateStart - for i := 0; ; { - jt := x.joinType() - if s[i:i+sz] == zwj { - jt = joinZWJ - } else if s[i:i+sz] == zwnj { - jt = joinZWNJ - } - st = joinStates[st][jt] - if x.isViramaModifier() { - st = joinStates[st][joinVirama] - } - if i += sz; i == len(s) { - break - } - v, sz = trie.lookupString(s[i:]) - x = info(v) - } - if st == stateFAIL || st == stateAfter { - return &labelError{s, "C"} - } - return nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go deleted file mode 100644 index 8842146b5..000000000 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ /dev/null @@ -1,682 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -// Package idna implements IDNA2008 using the compatibility processing -// defined by UTS (Unicode Technical Standard) #46, which defines a standard to -// deal with the transition from IDNA2003. -// -// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC -// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. -// UTS #46 is defined in https://www.unicode.org/reports/tr46. -// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the -// differences between these two standards. -package idna // import "golang.org/x/net/idna" - -import ( - "fmt" - "strings" - "unicode/utf8" - - "golang.org/x/text/secure/bidirule" - "golang.org/x/text/unicode/norm" -) - -// NOTE: Unlike common practice in Go APIs, the functions will return a -// sanitized domain name in case of errors. Browsers sometimes use a partially -// evaluated string as lookup. -// TODO: the current error handling is, in my opinion, the least opinionated. -// Other strategies are also viable, though: -// Option 1) Return an empty string in case of error, but allow the user to -// specify explicitly which errors to ignore. -// Option 2) Return the partially evaluated string if it is itself a valid -// string, otherwise return the empty string in case of error. -// Option 3) Option 1 and 2. -// Option 4) Always return an empty string for now and implement Option 1 as -// needed, and document that the return string may not be empty in case of -// error in the future. -// I think Option 1 is best, but it is quite opinionated. - -// ToASCII is a wrapper for Punycode.ToASCII. -func ToASCII(s string) (string, error) { - return Punycode.process(s, true) -} - -// ToUnicode is a wrapper for Punycode.ToUnicode. -func ToUnicode(s string) (string, error) { - return Punycode.process(s, false) -} - -// An Option configures a Profile at creation time. -type Option func(*options) - -// Transitional sets a Profile to use the Transitional mapping as defined in UTS -// #46. This will cause, for example, "ß" to be mapped to "ss". Using the -// transitional mapping provides a compromise between IDNA2003 and IDNA2008 -// compatibility. It is used by most browsers when resolving domain names. This -// option is only meaningful if combined with MapForLookup. -func Transitional(transitional bool) Option { - return func(o *options) { o.transitional = true } -} - -// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts -// are longer than allowed by the RFC. -func VerifyDNSLength(verify bool) Option { - return func(o *options) { o.verifyDNSLength = verify } -} - -// RemoveLeadingDots removes leading label separators. Leading runes that map to -// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. -// -// This is the behavior suggested by the UTS #46 and is adopted by some -// browsers. -func RemoveLeadingDots(remove bool) Option { - return func(o *options) { o.removeLeadingDots = remove } -} - -// ValidateLabels sets whether to check the mandatory label validation criteria -// as defined in Section 5.4 of RFC 5891. This includes testing for correct use -// of hyphens ('-'), normalization, validity of runes, and the context rules. -func ValidateLabels(enable bool) Option { - return func(o *options) { - // Don't override existing mappings, but set one that at least checks - // normalization if it is not set. - if o.mapping == nil && enable { - o.mapping = normalize - } - o.trie = trie - o.validateLabels = enable - o.fromPuny = validateFromPunycode - } -} - -// StrictDomainName limits the set of permissable ASCII characters to those -// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the -// hyphen). This is set by default for MapForLookup and ValidateForRegistration. -// -// This option is useful, for instance, for browsers that allow characters -// outside this range, for example a '_' (U+005F LOW LINE). See -// http://www.rfc-editor.org/std/std3.txt for more details This option -// corresponds to the UseSTD3ASCIIRules option in UTS #46. -func StrictDomainName(use bool) Option { - return func(o *options) { - o.trie = trie - o.useSTD3Rules = use - o.fromPuny = validateFromPunycode - } -} - -// NOTE: the following options pull in tables. The tables should not be linked -// in as long as the options are not used. - -// BidiRule enables the Bidi rule as defined in RFC 5893. Any application -// that relies on proper validation of labels should include this rule. -func BidiRule() Option { - return func(o *options) { o.bidirule = bidirule.ValidString } -} - -// ValidateForRegistration sets validation options to verify that a given IDN is -// properly formatted for registration as defined by Section 4 of RFC 5891. -func ValidateForRegistration() Option { - return func(o *options) { - o.mapping = validateRegistration - StrictDomainName(true)(o) - ValidateLabels(true)(o) - VerifyDNSLength(true)(o) - BidiRule()(o) - } -} - -// MapForLookup sets validation and mapping options such that a given IDN is -// transformed for domain name lookup according to the requirements set out in -// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, -// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option -// to add this check. -// -// The mappings include normalization and mapping case, width and other -// compatibility mappings. -func MapForLookup() Option { - return func(o *options) { - o.mapping = validateAndMap - StrictDomainName(true)(o) - ValidateLabels(true)(o) - RemoveLeadingDots(true)(o) - } -} - -type options struct { - transitional bool - useSTD3Rules bool - validateLabels bool - verifyDNSLength bool - removeLeadingDots bool - - trie *idnaTrie - - // fromPuny calls validation rules when converting A-labels to U-labels. - fromPuny func(p *Profile, s string) error - - // mapping implements a validation and mapping step as defined in RFC 5895 - // or UTS 46, tailored to, for example, domain registration or lookup. - mapping func(p *Profile, s string) (string, error) - - // bidirule, if specified, checks whether s conforms to the Bidi Rule - // defined in RFC 5893. - bidirule func(s string) bool -} - -// A Profile defines the configuration of a IDNA mapper. -type Profile struct { - options -} - -func apply(o *options, opts []Option) { - for _, f := range opts { - f(o) - } -} - -// New creates a new Profile. -// -// With no options, the returned Profile is the most permissive and equals the -// Punycode Profile. Options can be passed to further restrict the Profile. The -// MapForLookup and ValidateForRegistration options set a collection of options, -// for lookup and registration purposes respectively, which can be tailored by -// adding more fine-grained options, where later options override earlier -// options. -func New(o ...Option) *Profile { - p := &Profile{} - apply(&p.options, o) - return p -} - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". If an error is encountered it will return -// an error and a (partially) processed result. -func (p *Profile) ToASCII(s string) (string, error) { - return p.process(s, true) -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". If an error is encountered it will return -// an error and a (partially) processed result. -func (p *Profile) ToUnicode(s string) (string, error) { - pp := *p - pp.transitional = false - return pp.process(s, false) -} - -// String reports a string with a description of the profile for debugging -// purposes. The string format may change with different versions. -func (p *Profile) String() string { - s := "" - if p.transitional { - s = "Transitional" - } else { - s = "NonTransitional" - } - if p.useSTD3Rules { - s += ":UseSTD3Rules" - } - if p.validateLabels { - s += ":ValidateLabels" - } - if p.verifyDNSLength { - s += ":VerifyDNSLength" - } - return s -} - -var ( - // Punycode is a Profile that does raw punycode processing with a minimum - // of validation. - Punycode *Profile = punycode - - // Lookup is the recommended profile for looking up domain names, according - // to Section 5 of RFC 5891. The exact configuration of this profile may - // change over time. - Lookup *Profile = lookup - - // Display is the recommended profile for displaying domain names. - // The configuration of this profile may change over time. - Display *Profile = display - - // Registration is the recommended profile for checking whether a given - // IDN is valid for registration, according to Section 4 of RFC 5891. - Registration *Profile = registration - - punycode = &Profile{} - lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, - }} - display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, - }} - registration = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - verifyDNSLength: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateRegistration, - bidirule: bidirule.ValidString, - }} - - // TODO: profiles - // Register: recommended for approving domain names: don't do any mappings - // but rather reject on invalid input. Bundle or block deviation characters. -) - -type labelError struct{ label, code_ string } - -func (e labelError) code() string { return e.code_ } -func (e labelError) Error() string { - return fmt.Sprintf("idna: invalid label %q", e.label) -} - -type runeError rune - -func (e runeError) code() string { return "P1" } -func (e runeError) Error() string { - return fmt.Sprintf("idna: disallowed rune %U", e) -} - -// process implements the algorithm described in section 4 of UTS #46, -// see https://www.unicode.org/reports/tr46. -func (p *Profile) process(s string, toASCII bool) (string, error) { - var err error - if p.mapping != nil { - s, err = p.mapping(p, s) - } - // Remove leading empty labels. - if p.removeLeadingDots { - for ; len(s) > 0 && s[0] == '.'; s = s[1:] { - } - } - // It seems like we should only create this error on ToASCII, but the - // UTS 46 conformance tests suggests we should always check this. - if err == nil && p.verifyDNSLength && s == "" { - err = &labelError{s, "A4"} - } - labels := labelIter{orig: s} - for ; !labels.done(); labels.next() { - label := labels.label() - if label == "" { - // Empty labels are not okay. The label iterator skips the last - // label if it is empty. - if err == nil && p.verifyDNSLength { - err = &labelError{s, "A4"} - } - continue - } - if strings.HasPrefix(label, acePrefix) { - u, err2 := decode(label[len(acePrefix):]) - if err2 != nil { - if err == nil { - err = err2 - } - // Spec says keep the old label. - continue - } - labels.set(u) - if err == nil && p.validateLabels { - err = p.fromPuny(p, u) - } - if err == nil { - // This should be called on NonTransitional, according to the - // spec, but that currently does not have any effect. Use the - // original profile to preserve options. - err = p.validateLabel(u) - } - } else if err == nil { - err = p.validateLabel(label) - } - } - if toASCII { - for labels.reset(); !labels.done(); labels.next() { - label := labels.label() - if !ascii(label) { - a, err2 := encode(acePrefix, label) - if err == nil { - err = err2 - } - label = a - labels.set(a) - } - n := len(label) - if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { - err = &labelError{label, "A4"} - } - } - } - s = labels.result() - if toASCII && p.verifyDNSLength && err == nil { - // Compute the length of the domain name minus the root label and its dot. - n := len(s) - if n > 0 && s[n-1] == '.' { - n-- - } - if len(s) < 1 || n > 253 { - err = &labelError{s, "A4"} - } - } - return s, err -} - -func normalize(p *Profile, s string) (string, error) { - return norm.NFC.String(s), nil -} - -func validateRegistration(p *Profile, s string) (string, error) { - if !norm.NFC.IsNormalString(s) { - return s, &labelError{s, "V1"} - } - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - // Copy bytes not copied so far. - switch p.simplify(info(v).category()) { - // TODO: handle the NV8 defined in the Unicode idna data set to allow - // for strict conformance to IDNA2008. - case valid, deviation: - case disallowed, mapped, unknown, ignored: - r, _ := utf8.DecodeRuneInString(s[i:]) - return s, runeError(r) - } - i += sz - } - return s, nil -} - -func validateAndMap(p *Profile, s string) (string, error) { - var ( - err error - b []byte - k int - ) - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - start := i - i += sz - // Copy bytes not copied so far. - switch p.simplify(info(v).category()) { - case valid: - continue - case disallowed: - if err == nil { - r, _ := utf8.DecodeRuneInString(s[start:]) - err = runeError(r) - } - continue - case mapped, deviation: - b = append(b, s[k:start]...) - b = info(v).appendMapping(b, s[start:i]) - case ignored: - b = append(b, s[k:start]...) - // drop the rune - case unknown: - b = append(b, s[k:start]...) - b = append(b, "\ufffd"...) - } - k = i - } - if k == 0 { - // No changes so far. - s = norm.NFC.String(s) - } else { - b = append(b, s[k:]...) - if norm.NFC.QuickSpan(b) != len(b) { - b = norm.NFC.Bytes(b) - } - // TODO: the punycode converters require strings as input. - s = string(b) - } - return s, err -} - -// A labelIter allows iterating over domain name labels. -type labelIter struct { - orig string - slice []string - curStart int - curEnd int - i int -} - -func (l *labelIter) reset() { - l.curStart = 0 - l.curEnd = 0 - l.i = 0 -} - -func (l *labelIter) done() bool { - return l.curStart >= len(l.orig) -} - -func (l *labelIter) result() string { - if l.slice != nil { - return strings.Join(l.slice, ".") - } - return l.orig -} - -func (l *labelIter) label() string { - if l.slice != nil { - return l.slice[l.i] - } - p := strings.IndexByte(l.orig[l.curStart:], '.') - l.curEnd = l.curStart + p - if p == -1 { - l.curEnd = len(l.orig) - } - return l.orig[l.curStart:l.curEnd] -} - -// next sets the value to the next label. It skips the last label if it is empty. -func (l *labelIter) next() { - l.i++ - if l.slice != nil { - if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { - l.curStart = len(l.orig) - } - } else { - l.curStart = l.curEnd + 1 - if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { - l.curStart = len(l.orig) - } - } -} - -func (l *labelIter) set(s string) { - if l.slice == nil { - l.slice = strings.Split(l.orig, ".") - } - l.slice[l.i] = s -} - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -func (p *Profile) simplify(cat category) category { - switch cat { - case disallowedSTD3Mapped: - if p.useSTD3Rules { - cat = disallowed - } else { - cat = mapped - } - case disallowedSTD3Valid: - if p.useSTD3Rules { - cat = disallowed - } else { - cat = valid - } - case deviation: - if !p.transitional { - cat = valid - } - case validNV8, validXV8: - // TODO: handle V2008 - cat = valid - } - return cat -} - -func validateFromPunycode(p *Profile, s string) error { - if !norm.NFC.IsNormalString(s) { - return &labelError{s, "V1"} - } - for i := 0; i < len(s); { - v, sz := trie.lookupString(s[i:]) - if c := p.simplify(info(v).category()); c != valid && c != deviation { - return &labelError{s, "V6"} - } - i += sz - } - return nil -} - -const ( - zwnj = "\u200c" - zwj = "\u200d" -) - -type joinState int8 - -const ( - stateStart joinState = iota - stateVirama - stateBefore - stateBeforeVirama - stateAfter - stateFAIL -) - -var joinStates = [][numJoinTypes]joinState{ - stateStart: { - joiningL: stateBefore, - joiningD: stateBefore, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateVirama, - }, - stateVirama: { - joiningL: stateBefore, - joiningD: stateBefore, - }, - stateBefore: { - joiningL: stateBefore, - joiningD: stateBefore, - joiningT: stateBefore, - joinZWNJ: stateAfter, - joinZWJ: stateFAIL, - joinVirama: stateBeforeVirama, - }, - stateBeforeVirama: { - joiningL: stateBefore, - joiningD: stateBefore, - joiningT: stateBefore, - }, - stateAfter: { - joiningL: stateFAIL, - joiningD: stateBefore, - joiningT: stateAfter, - joiningR: stateStart, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateAfter, // no-op as we can't accept joiners here - }, - stateFAIL: { - 0: stateFAIL, - joiningL: stateFAIL, - joiningD: stateFAIL, - joiningT: stateFAIL, - joiningR: stateFAIL, - joinZWNJ: stateFAIL, - joinZWJ: stateFAIL, - joinVirama: stateFAIL, - }, -} - -// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are -// already implicitly satisfied by the overall implementation. -func (p *Profile) validateLabel(s string) error { - if s == "" { - if p.verifyDNSLength { - return &labelError{s, "A4"} - } - return nil - } - if p.bidirule != nil && !p.bidirule(s) { - return &labelError{s, "B"} - } - if !p.validateLabels { - return nil - } - trie := p.trie // p.validateLabels is only set if trie is set. - if len(s) > 4 && s[2] == '-' && s[3] == '-' { - return &labelError{s, "V2"} - } - if s[0] == '-' || s[len(s)-1] == '-' { - return &labelError{s, "V3"} - } - // TODO: merge the use of this in the trie. - v, sz := trie.lookupString(s) - x := info(v) - if x.isModifier() { - return &labelError{s, "V5"} - } - // Quickly return in the absence of zero-width (non) joiners. - if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { - return nil - } - st := stateStart - for i := 0; ; { - jt := x.joinType() - if s[i:i+sz] == zwj { - jt = joinZWJ - } else if s[i:i+sz] == zwnj { - jt = joinZWNJ - } - st = joinStates[st][jt] - if x.isViramaModifier() { - st = joinStates[st][joinVirama] - } - if i += sz; i == len(s) { - break - } - v, sz = trie.lookupString(s[i:]) - x = info(v) - } - if st == stateFAIL || st == stateAfter { - return &labelError{s, "C"} - } - return nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go deleted file mode 100644 index 02c7d59af..000000000 --- a/vendor/golang.org/x/net/idna/punycode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "math" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -func punyError(s string) error { return &labelError{s, "A3"} } - -// decode decodes a string as specified in section 6.2. -func decode(encoded string) (string, error) { - if encoded == "" { - return "", nil - } - pos := 1 + strings.LastIndex(encoded, "-") - if pos == 1 { - return "", punyError(encoded) - } - if pos == len(encoded) { - return encoded[:len(encoded)-1], nil - } - output := make([]rune, 0, len(encoded)) - if pos != 0 { - for _, r := range encoded[:pos-1] { - output = append(output, r) - } - } - i, n, bias := int32(0), initialN, initialBias - for pos < len(encoded) { - oldI, w := i, int32(1) - for k := base; ; k += base { - if pos == len(encoded) { - return "", punyError(encoded) - } - digit, ok := decodeDigit(encoded[pos]) - if !ok { - return "", punyError(encoded) - } - pos++ - i += digit * w - if i < 0 { - return "", punyError(encoded) - } - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if digit < t { - break - } - w *= base - t - if w >= math.MaxInt32/base { - return "", punyError(encoded) - } - } - x := int32(len(output) + 1) - bias = adapt(i-oldI, x, oldI == 0) - n += i / x - i %= x - if n > utf8.MaxRune || len(output) >= 1024 { - return "", punyError(encoded) - } - output = append(output, 0) - copy(output[i+1:], output[i:]) - output[i] = n - i++ - } - return string(output), nil -} - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", punyError(s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", punyError(s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func decodeDigit(x byte) (digit int32, ok bool) { - switch { - case '0' <= x && x <= '9': - return int32(x - ('0' - 26)), true - case 'A' <= x && x <= 'Z': - return int32(x - 'A'), true - case 'a' <= x && x <= 'z': - return int32(x - 'a'), true - } - return 0, false -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("idna: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go deleted file mode 100644 index 54fddb4b1..000000000 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ /dev/null @@ -1,4559 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// +build go1.10,!go1.13 - -package idna - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "10.0.0" - -var mappings string = "" + // Size: 8175 bytes - "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + - "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + - "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + - "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + - "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + - "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + - "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + - "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + - "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + - "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + - ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + - "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + - "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + - "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + - "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + - "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + - "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + - "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + - "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + - "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + - "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + - "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + - "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + - "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + - "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + - "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + - "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + - "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + - "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + - "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + - "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + - "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + - "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + - "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + - "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + - "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + - "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + - "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + - "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + - "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + - "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + - "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + - "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + - "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + - "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + - " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + - "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + - "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + - "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + - "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + - "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + - "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + - "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + - "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + - "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + - "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + - "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + - "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + - "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + - "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + - "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + - "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + - "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + - "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + - "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + - "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + - "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + - "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + - "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + - "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + - "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + - "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + - "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + - "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + - "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + - "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + - "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + - "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + - "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + - "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + - "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + - "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + - "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + - "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + - "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + - "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + - "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + - "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + - "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + - "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + - "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + - "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + - "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + - "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + - "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + - "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + - "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + - "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + - "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + - "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + - "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + - "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + - "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + - "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + - "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + - "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + - "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + - "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + - "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" - -var xorData string = "" + // Size: 4855 bytes - "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + - "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + - "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + - "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + - "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + - "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + - "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + - "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + - "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + - "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + - "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + - "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + - "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + - "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + - "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + - "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + - "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + - "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + - "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + - "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + - "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + - "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + - "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + - "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + - "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + - "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + - "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + - "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + - "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + - "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + - "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + - "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + - "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + - "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + - "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + - "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + - "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + - "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + - "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + - "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + - "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + - "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + - ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + - "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + - "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + - "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + - "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + - "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + - "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + - "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + - "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + - "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + - "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + - "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + - "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + - "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + - "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + - "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + - "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + - "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + - "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + - "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + - "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + - "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + - "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + - "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + - "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + - "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + - "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + - "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + - "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + - "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + - "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + - "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + - "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + - "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + - "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + - "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + - "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + - "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + - "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + - "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + - "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + - "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + - "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + - "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + - "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + - "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + - "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + - "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + - "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + - "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + - "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + - ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + - "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + - "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + - "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + - "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + - "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + - "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + - "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + - "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + - "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + - "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + - "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + - "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + - "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + - "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + - "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + - "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + - "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + - "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + - "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + - "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + - "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + - "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + - "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + - "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + - "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + - "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + - "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + - "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + - "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + - "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + - "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + - "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + - "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + - "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + - "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + - "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + - "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + - "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + - "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + - "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + - "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + - "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + - "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + - "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + - "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + - "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + - "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + - "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + - "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + - "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + - "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + - "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + - "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + - "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + - "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + - "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + - "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + - "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + - "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + - "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + - "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + - "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + - "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + - "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + - "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + - "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + - "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + - "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + - "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + - "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + - "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + - "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + - "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + - "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + - "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + - "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + - "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + - "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + - "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + - "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + - "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + - "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + - "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + - "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + - "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + - "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + - "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + - "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + - "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + - "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + - "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + - "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + - "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + - "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + - "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + - "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + - "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + - "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + - "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + - "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + - "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + - "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + - "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + - "\x04\x03\x0c?\x05\x03\x0c" + - "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + - "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + - "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + - "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + - "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + - "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + - "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + - "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + - "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + - "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + - "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + - "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + - "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + - "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + - "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + - "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + - "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + - "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + - "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + - "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + - "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + - "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + - "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + - "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + - "\x05\x22\x05\x03\x050\x1d" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. -type idnaTrie struct{} - -func newIdnaTrie(i int) *idnaTrie { - return &idnaTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { - switch { - case n < 125: - return uint16(idnaValues[n<<6+uint32(b)]) - default: - n -= 125 - return uint16(idnaSparse.lookup(n, b)) - } -} - -// idnaValues: 127 blocks, 8128 entries, 16256 bytes -// The third block is the zero block. -var idnaValues = [8128]uint16{ - // Block 0x0, offset 0x0 - 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, - 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, - 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, - 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, - 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, - 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, - 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, - 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, - 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, - 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, - 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, - // Block 0x1, offset 0x40 - 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, - 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, - 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, - 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, - 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, - 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, - 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, - 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, - 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, - 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, - 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, - 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, - 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, - 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, - 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, - 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, - // Block 0x4, offset 0x100 - 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, - 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, - 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, - 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, - 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, - 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, - 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, - 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, - 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, - // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, - 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, - 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, - 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, - 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, - 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, - 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, - 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, - 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, - // Block 0x6, offset 0x180 - 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, - 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, - 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, - 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, - 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, - 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, - 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, - 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, - 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, - 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, - 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, - 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, - 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, - 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, - 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, - 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, - 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, - 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, - 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, - 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, - // Block 0x8, offset 0x200 - 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, - 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, - 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, - 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, - 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, - 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, - 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, - 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, - 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, - // Block 0x9, offset 0x240 - 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, - 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, - 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, - 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, - 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, - 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, - 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, - 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, - 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, - // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, - 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, - 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, - 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, - 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, - 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, - 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, - 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, - 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, - 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, - 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, - 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, - 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, - 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, - 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, - 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, - 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, - 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, - // Block 0xc, offset 0x300 - 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, - 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, - 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, - 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, - 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, - 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, - 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, - 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, - 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, - 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, - 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, - // Block 0xd, offset 0x340 - 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, - 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, - 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, - 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, - 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, - 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, - 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, - 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, - 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, - 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, - 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, - // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, - 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, - 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, - 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, - 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, - 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, - 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, - 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, - 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, - 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, - 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, - 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, - 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, - 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, - 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, - 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, - 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, - 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, - 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, - 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, - 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, - // Block 0x10, offset 0x400 - 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, - 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, - 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, - 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, - 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, - 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, - 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, - 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, - 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, - 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, - 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, - // Block 0x11, offset 0x440 - 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, - 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, - 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, - 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, - 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, - 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, - 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, - 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, - 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, - 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, - // Block 0x12, offset 0x480 - 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, - 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, - 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, - 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, - 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, - 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, - 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, - 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, - 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, - 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, - 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, - 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, - 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, - 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, - 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, - 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, - 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, - 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, - 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, - 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, - 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, - // Block 0x14, offset 0x500 - 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, - 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, - 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, - 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, - 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, - 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, - 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, - 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, - 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, - 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, - 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, - // Block 0x15, offset 0x540 - 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, - 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, - 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, - 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, - 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, - 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, - 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, - 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, - 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, - // Block 0x16, offset 0x580 - 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, - 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, - 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, - 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, - 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, - 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, - 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, - 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, - 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, - 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, - 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, - 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, - 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, - 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, - 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, - 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, - 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, - // Block 0x18, offset 0x600 - 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, - 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, - 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, - 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, - 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, - 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, - 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, - 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, - // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, - 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, - 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, - 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, - 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, - 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, - 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, - // Block 0x1a, offset 0x680 - 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, - 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, - 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, - 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, - 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, - 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, - 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, - 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, - 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, - 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, - 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, - 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, - 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, - 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, - 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, - 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, - 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, - // Block 0x1c, offset 0x700 - 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, - 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, - 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, - 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, - 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, - 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, - 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, - 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, - // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, - 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, - 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, - 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, - 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, - // Block 0x1e, offset 0x780 - 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, - 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, - 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, - 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, - 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, - 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, - 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, - 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, - 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, - 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, - 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, - 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, - 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, - // Block 0x20, offset 0x800 - 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, - 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, - 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, - 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, - 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, - 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, - 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, - // Block 0x21, offset 0x840 - 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, - 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, - 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, - 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, - 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, - 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, - 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, - 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, - 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, - 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, - // Block 0x22, offset 0x880 - 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, - 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, - 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, - 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, - 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, - 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, - 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, - 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, - 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, - 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, - 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, - 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, - 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, - 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, - 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, - 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, - 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, - // Block 0x24, offset 0x900 - 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, - 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, - 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, - 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, - 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, - 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, - 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, - 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, - 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, - 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, - 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, - // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, - 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, - 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, - 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, - 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, - // Block 0x26, offset 0x980 - 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, - 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, - 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, - 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, - 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, - 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, - 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, - 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, - 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, - 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, - 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, - // Block 0x27, offset 0x9c0 - 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, - 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, - 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, - 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, - 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, - 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, - 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, - 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, - 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, - 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, - // Block 0x28, offset 0xa00 - 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, - 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, - 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, - 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, - 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, - 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, - 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, - 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, - 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, - 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, - 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, - // Block 0x29, offset 0xa40 - 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, - 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, - 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, - 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, - 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, - 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, - 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, - 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, - 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, - 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, - // Block 0x2a, offset 0xa80 - 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, - 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, - 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, - 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, - 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, - 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, - 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, - 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, - 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, - 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, - 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, - // Block 0x2b, offset 0xac0 - 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, - 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, - 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, - 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, - 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, - 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, - 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, - 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, - 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, - 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, - 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, - // Block 0x2c, offset 0xb00 - 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, - 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, - 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, - 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, - 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, - 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, - 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, - 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, - // Block 0x2d, offset 0xb40 - 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, - 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, - 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, - 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, - 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, - 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, - 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, - 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, - 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, - 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, - 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, - // Block 0x2e, offset 0xb80 - 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, - 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, - 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, - 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, - 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, - 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, - 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, - 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, - 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, - 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, - 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, - 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, - 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, - 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, - 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, - 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, - 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, - 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, - // Block 0x30, offset 0xc00 - 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, - 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, - 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, - 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, - 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, - 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, - 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, - 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, - 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, - 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, - 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, - // Block 0x31, offset 0xc40 - 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, - 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, - 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, - 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, - 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, - 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, - 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, - 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, - 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, - 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, - 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, - // Block 0x32, offset 0xc80 - 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, - 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, - 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, - 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, - 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, - 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, - 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, - 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, - 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, - 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, - 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, - // Block 0x33, offset 0xcc0 - 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, - 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, - 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, - 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, - 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, - 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, - 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, - 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, - 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, - 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, - 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, - // Block 0x34, offset 0xd00 - 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, - 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, - 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, - 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, - 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, - 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, - 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, - 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, - 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, - 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, - 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, - // Block 0x35, offset 0xd40 - 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, - 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, - 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, - 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, - 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, - 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, - 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, - 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, - 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, - 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, - 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, - // Block 0x36, offset 0xd80 - 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, - 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, - 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, - 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, - 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, - 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, - 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, - 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, - 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, - 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, - 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, - 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, - 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, - 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, - 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, - 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, - 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, - 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, - 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, - 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, - 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, - // Block 0x38, offset 0xe00 - 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, - 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, - 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, - 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, - 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, - 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, - 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, - 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, - 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, - 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, - // Block 0x39, offset 0xe40 - 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, - 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, - 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, - 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, - 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, - 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, - 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, - 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, - 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, - 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, - 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, - // Block 0x3a, offset 0xe80 - 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, - 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, - 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, - 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, - 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, - 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, - 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, - 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, - 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, - 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, - // Block 0x3b, offset 0xec0 - 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, - 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, - 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, - 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, - 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, - 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, - 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, - 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, - 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, - 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, - 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, - // Block 0x3c, offset 0xf00 - 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, - 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, - 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, - 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, - 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, - 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, - 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, - 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, - 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, - 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, - 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, - // Block 0x3d, offset 0xf40 - 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, - 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, - 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, - 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, - 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, - 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, - 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, - 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, - 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, - 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, - 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, - // Block 0x3e, offset 0xf80 - 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, - 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, - 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, - 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, - 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, - 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, - 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, - 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, - 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, - 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, - 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, - 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, - 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, - 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, - 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, - 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, - 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, - 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, - 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, - 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, - 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, - // Block 0x40, offset 0x1000 - 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, - 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, - 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, - 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, - 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, - 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, - 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, - 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, - 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, - 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, - 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, - // Block 0x41, offset 0x1040 - 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, - 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, - 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, - 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, - 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, - 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, - 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, - 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, - 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, - 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, - 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, - // Block 0x42, offset 0x1080 - 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, - 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, - 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, - 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, - 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, - 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, - 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, - 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, - 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, - 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, - 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, - 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, - 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, - 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, - 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, - 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, - 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, - 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, - 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, - 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, - 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, - // Block 0x44, offset 0x1100 - 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, - 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, - 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, - 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, - 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, - 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, - 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, - 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, - 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, - 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, - // Block 0x45, offset 0x1140 - 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, - 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, - 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, - 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, - 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, - 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, - 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, - 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, - 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, - // Block 0x46, offset 0x1180 - 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, - 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, - 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, - 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, - 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, - 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, - 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, - 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, - 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, - // Block 0x47, offset 0x11c0 - 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, - 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, - 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, - 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, - 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, - 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, - 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, - 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, - 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, - 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, - 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, - // Block 0x48, offset 0x1200 - 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, - 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, - 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, - 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, - 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, - 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, - 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, - 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, - 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, - 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, - 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, - // Block 0x49, offset 0x1240 - 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, - 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, - 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, - 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, - 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, - 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, - 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, - 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, - 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, - 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, - 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, - // Block 0x4a, offset 0x1280 - 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, - 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, - 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, - 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, - 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, - 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, - 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, - 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, - 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, - 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, - 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, - 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, - 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, - 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, - 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, - 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, - 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, - 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, - 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, - 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, - 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, - // Block 0x4c, offset 0x1300 - 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, - 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, - 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, - 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, - 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, - 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, - 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, - 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, - 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, - 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, - 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, - // Block 0x4d, offset 0x1340 - 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, - 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, - 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, - 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, - 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, - 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, - 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, - 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, - 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, - 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, - 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, - // Block 0x4e, offset 0x1380 - 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, - 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, - 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, - 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, - 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, - 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, - 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, - 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, - 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, - 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, - 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, - 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, - 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, - 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, - 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, - 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, - 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, - 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, - 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, - 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, - 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, - // Block 0x50, offset 0x1400 - 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, - 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, - 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, - 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, - 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, - 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, - 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, - 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, - 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, - 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, - 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, - // Block 0x51, offset 0x1440 - 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, - 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, - 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, - 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, - 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, - 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, - 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, - 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, - 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, - 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, - 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, - // Block 0x52, offset 0x1480 - 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, - 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, - 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, - 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, - 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, - 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, - 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, - 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, - 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, - 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, - 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, - 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, - 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, - 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, - 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, - 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, - 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, - 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, - 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, - 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, - 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, - // Block 0x54, offset 0x1500 - 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, - 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, - 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, - 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, - 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, - 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, - 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, - 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, - 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, - // Block 0x55, offset 0x1540 - 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, - 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, - 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, - 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, - 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, - 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, - 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, - 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, - 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, - 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, - 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, - // Block 0x56, offset 0x1580 - 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, - 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, - 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, - 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, - 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, - 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, - 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, - 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, - 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, - 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, - 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, - // Block 0x57, offset 0x15c0 - 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, - 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, - 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, - 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, - 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, - 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, - 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, - 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, - 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, - 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, - 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, - // Block 0x58, offset 0x1600 - 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, - 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, - 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, - 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, - 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, - 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, - 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, - 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, - 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, - 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, - 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, - // Block 0x59, offset 0x1640 - 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, - 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, - 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, - 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, - 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, - 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, - 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, - 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, - 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, - 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, - 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, - // Block 0x5a, offset 0x1680 - 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, - 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, - 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, - 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, - 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, - 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, - 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, - 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, - 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, - 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, - 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, - 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, - 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, - 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, - 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, - 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, - 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, - 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, - 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, - 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, - 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, - // Block 0x5c, offset 0x1700 - 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, - 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, - 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, - 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, - 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, - 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, - 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, - 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, - 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, - // Block 0x5d, offset 0x1740 - 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, - 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, - 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, - 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, - 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, - 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, - 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, - 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, - 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, - 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, - // Block 0x5e, offset 0x1780 - 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, - 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, - 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, - 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, - 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, - 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, - 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, - 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, - 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, - 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, - 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, - 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, - 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, - 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, - 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, - 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, - 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, - 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, - 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, - // Block 0x60, offset 0x1800 - 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, - 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, - 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, - 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, - 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, - 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, - 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, - 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, - 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, - 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, - 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, - // Block 0x61, offset 0x1840 - 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, - 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, - 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, - 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, - 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, - 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, - 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, - 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, - 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, - 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, - 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, - // Block 0x62, offset 0x1880 - 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, - 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, - 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, - 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, - 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, - 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, - 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, - 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, - 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, - 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, - 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, - 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, - 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, - 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, - 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, - 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, - 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, - 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, - 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, - 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, - 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, - // Block 0x64, offset 0x1900 - 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, - 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, - 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, - 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, - 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, - 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, - 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, - 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, - 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, - 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, - 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, - // Block 0x65, offset 0x1940 - 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, - 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, - 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, - 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, - 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, - 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, - 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, - 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, - 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, - 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, - 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, - // Block 0x66, offset 0x1980 - 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, - 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, - 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, - 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, - 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, - 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, - 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, - 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, - 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, - 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, - 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, - 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, - 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, - 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, - 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, - 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, - 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, - 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, - 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, - 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, - 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, - 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, - 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, - 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, - 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, - 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, - 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, - 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, - 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, - 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, - 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, - // Block 0x69, offset 0x1a40 - 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, - 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, - 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, - 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, - 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, - 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, - 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, - 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, - 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, - 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, - 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, - // Block 0x6a, offset 0x1a80 - 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, - 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, - 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, - 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, - 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, - 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, - 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, - 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, - 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, - 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, - 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, - // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, - 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, - 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, - 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, - 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, - 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, - 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, - 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, - 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, - 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, - 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, - // Block 0x6c, offset 0x1b00 - 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, - 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, - 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, - 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, - 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, - 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, - 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, - 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, - 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, - 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, - 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, - // Block 0x6d, offset 0x1b40 - 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, - 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, - 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, - 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, - 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, - 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, - 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, - 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, - 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, - 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, - 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, - // Block 0x6e, offset 0x1b80 - 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, - 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, - 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, - 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, - 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, - 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, - 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, - 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, - 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, - 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, - 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, - // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, - 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, - 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, - 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, - 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, - 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, - 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, - 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, - 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, - 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, - 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, - // Block 0x70, offset 0x1c00 - 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, - 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, - 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, - 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, - 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, - 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, - 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, - 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, - 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, - 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, - 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, - // Block 0x71, offset 0x1c40 - 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, - 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, - 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, - 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, - 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, - 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, - 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, - 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, - 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, - 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, - 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, - // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, - 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, - 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, - 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, - 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, - 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, - 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, - // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, - 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, - 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, - 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, - 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, - 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, - 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, - 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, - 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, - 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, - 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, - // Block 0x74, offset 0x1d00 - 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, - 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, - 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, - 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, - 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, - 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, - 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, - 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, - 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, - 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, - 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, - // Block 0x75, offset 0x1d40 - 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, - 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, - 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, - 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, - 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, - 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, - 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, - 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, - 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, - 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, - 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, - // Block 0x76, offset 0x1d80 - 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, - 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, - 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, - 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, - 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, - 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, - 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, - 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, - 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, - 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, - 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, - // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, - 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, - 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, - 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, - 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, - 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, - 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, - 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, - 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, - 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, - 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, - // Block 0x78, offset 0x1e00 - 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, - 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, - 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, - 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, - 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, - 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, - 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, - 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, - 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, - 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, - 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, - // Block 0x79, offset 0x1e40 - 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, - 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, - 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, - 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, - 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, - 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, - 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, - 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, - 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, - 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, - 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, - // Block 0x7a, offset 0x1e80 - 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, - 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, - 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, - 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, - 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, - 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, - 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, - 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, - 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, - 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, - 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, - // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, - 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, - 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, - 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, - 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, - 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, - 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, - 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, - 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, - 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, - 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, - // Block 0x7c, offset 0x1f00 - 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, - 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, - 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, - 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, - 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, - 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, - 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, - 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, - 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, - 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, - 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, - // Block 0x7d, offset 0x1f40 - 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, - 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, - 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, - 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, - 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, - 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, - 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, - 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, - 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, - 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, - 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, - // Block 0x7e, offset 0x1f80 - 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, - 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, - 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, - 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, - 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, - 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, - 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, - 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, - 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, - 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, - 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, -} - -// idnaIndex: 36 blocks, 2304 entries, 4608 bytes -// Block 0 is the zero block. -var idnaIndex = [2304]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, - 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, - 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, - // Block 0x4, offset 0x100 - 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, - 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, - // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, - 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, - 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, - 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, - 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, - 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, - 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, - 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, - // Block 0x6, offset 0x180 - 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, - 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, - 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, - 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, - 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, - 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, - 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, - 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, - 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, - 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, - 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, - 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, - 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, - 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, - // Block 0x8, offset 0x200 - 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, - 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, - 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, - 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, - 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, - 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, - 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, - 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, - // Block 0x9, offset 0x240 - 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, - 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, - 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, - 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, - 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, - 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, - 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, - 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, - // Block 0xa, offset 0x280 - 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, - 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, - 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, - 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, - 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, - 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, - 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, - 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, - 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, - 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, - 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, - 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, - 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, - 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, - 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, - // Block 0xc, offset 0x300 - 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, - 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, - 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, - 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, - // Block 0xd, offset 0x340 - 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, - 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, - 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, - 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, - 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, - 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, - 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, - 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, - // Block 0xe, offset 0x380 - 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, - 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, - 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, - 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, - 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, - 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, - 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, - 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, - 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, - 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, - 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, - 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, - 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, - 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, - 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, - // Block 0x10, offset 0x400 - 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, - 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, - 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, - 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, - 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, - 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, - 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, - 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, - // Block 0x11, offset 0x440 - 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, - 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, - 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, - 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, - 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, - 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, - 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, - 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, - // Block 0x12, offset 0x480 - 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, - 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, - 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, - 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, - 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, - 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, - 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, - 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, - // Block 0x13, offset 0x4c0 - 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, - 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, - 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, - 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, - 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, - 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, - 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, - 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, - // Block 0x14, offset 0x500 - 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, - 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, - 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, - 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, - 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, - 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, - 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, - 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, - // Block 0x15, offset 0x540 - 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, - 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, - 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, - 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, - 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, - 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, - 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, - 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, - // Block 0x16, offset 0x580 - 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, - 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, - 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, - 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, - 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, - 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, - 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, - 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, - 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, - 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, - 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, - 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, - 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, - 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, - // Block 0x18, offset 0x600 - 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, - 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, - 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, - 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, - 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, - 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, - 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, - 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, - // Block 0x19, offset 0x640 - 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, - 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, - 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, - 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, - 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, - 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, - 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, - // Block 0x1a, offset 0x680 - 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, - 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, - 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, - 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, - 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, - 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, - 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, - 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, - 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, - 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, - 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, - 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, - 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, - 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, - 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, - // Block 0x1c, offset 0x700 - 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, - 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, - 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, - 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, - 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, - 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, - 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, - 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, - // Block 0x1d, offset 0x740 - 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, - 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, - 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, - 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, - 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, - 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, - 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, - 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, - // Block 0x1e, offset 0x780 - 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, - 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, - 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, - 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, - 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, - 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, - 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, - 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, - // Block 0x1f, offset 0x7c0 - 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, - 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, - 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, - 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, - 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, - 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, - // Block 0x20, offset 0x800 - 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, - 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, - 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, - 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, - 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, - 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, - 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, - 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, - // Block 0x21, offset 0x840 - 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, - 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, - 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, - 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, - 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, - 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, - 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, - 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, - // Block 0x22, offset 0x880 - 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, - 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, - 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, - 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, - 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, - 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, - 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, - 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, - 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, -} - -// idnaSparseOffset: 264 entries, 528 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} - -// idnaSparseValues: 1915 entries, 7660 bytes -var idnaSparseValues = [1915]valueRange{ - // Block 0x0, offset 0x0 - {value: 0x0000, lo: 0x07}, - {value: 0xe105, lo: 0x80, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x97}, - {value: 0xe105, lo: 0x98, hi: 0x9e}, - {value: 0x001f, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbf}, - // Block 0x1, offset 0x8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0xe01d, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0335, lo: 0x83, hi: 0x83}, - {value: 0x034d, lo: 0x84, hi: 0x84}, - {value: 0x0365, lo: 0x85, hi: 0x85}, - {value: 0xe00d, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0xe00d, lo: 0x88, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x89}, - {value: 0xe00d, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe00d, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0x8d}, - {value: 0xe00d, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0xbf}, - // Block 0x2, offset 0x19 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, - {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, - {value: 0x034d, lo: 0xb4, hi: 0xb4}, - {value: 0x0395, lo: 0xb5, hi: 0xb5}, - {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbf}, - // Block 0x3, offset 0x25 - {value: 0x0000, lo: 0x01}, - {value: 0x3308, lo: 0x80, hi: 0xbf}, - // Block 0x4, offset 0x27 - {value: 0x0000, lo: 0x04}, - {value: 0x03f5, lo: 0x80, hi: 0x8f}, - {value: 0xe105, lo: 0x90, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x5, offset 0x2c - {value: 0x0000, lo: 0x07}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x0545, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x6, offset 0x34 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x88}, - {value: 0x0018, lo: 0x89, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0xbd}, - {value: 0x0818, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x7, offset 0x3f - {value: 0x0000, lo: 0x0b}, - {value: 0x0818, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x82}, - {value: 0x0818, lo: 0x83, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x85}, - {value: 0x0818, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0808, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x8, offset 0x4b - {value: 0x0000, lo: 0x03}, - {value: 0x0a08, lo: 0x80, hi: 0x87}, - {value: 0x0c08, lo: 0x88, hi: 0x99}, - {value: 0x0a08, lo: 0x9a, hi: 0xbf}, - // Block 0x9, offset 0x4f - {value: 0x0000, lo: 0x0e}, - {value: 0x3308, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0c08, lo: 0x8d, hi: 0x8d}, - {value: 0x0a08, lo: 0x8e, hi: 0x98}, - {value: 0x0c08, lo: 0x99, hi: 0x9b}, - {value: 0x0a08, lo: 0x9c, hi: 0xaa}, - {value: 0x0c08, lo: 0xab, hi: 0xac}, - {value: 0x0a08, lo: 0xad, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb1}, - {value: 0x0a08, lo: 0xb2, hi: 0xb2}, - {value: 0x0c08, lo: 0xb3, hi: 0xb4}, - {value: 0x0a08, lo: 0xb5, hi: 0xb7}, - {value: 0x0c08, lo: 0xb8, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbf}, - // Block 0xa, offset 0x5e - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xb0}, - {value: 0x0808, lo: 0xb1, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xb, offset 0x63 - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x89}, - {value: 0x0a08, lo: 0x8a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0xc, offset 0x6b - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x99}, - {value: 0x0808, lo: 0x9a, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa3}, - {value: 0x0808, lo: 0xa4, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa7}, - {value: 0x0808, lo: 0xa8, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0818, lo: 0xb0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd, offset 0x77 - {value: 0x0000, lo: 0x0d}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0a08, lo: 0xa0, hi: 0xa9}, - {value: 0x0c08, lo: 0xaa, hi: 0xac}, - {value: 0x0808, lo: 0xad, hi: 0xad}, - {value: 0x0c08, lo: 0xae, hi: 0xae}, - {value: 0x0a08, lo: 0xaf, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb2}, - {value: 0x0a08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0a08, lo: 0xb6, hi: 0xb8}, - {value: 0x0c08, lo: 0xb9, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xe, offset 0x85 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0xa1}, - {value: 0x0840, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xbf}, - // Block 0xf, offset 0x8a - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0x93 - {value: 0x0000, lo: 0x0f}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x3008, lo: 0x86, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x3008, lo: 0x8a, hi: 0x8c}, - {value: 0x3b08, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x11, offset 0xa3 - {value: 0x0000, lo: 0x0d}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb1 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x3b08, lo: 0xbb, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x13, offset 0xbd - {value: 0x0000, lo: 0x0b}, - {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xc9 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x3b08, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x3008, lo: 0x98, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x15, offset 0xda - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb9}, - {value: 0x3b08, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x16, offset 0xe4 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x8e}, - {value: 0x0018, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x17, offset 0xeb - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, - {value: 0x0008, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x18, offset 0xf8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe03d, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x19, offset 0x109 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1a, offset 0x110 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1b, offset 0x11b - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x3008, lo: 0x96, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x3308, lo: 0x9e, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x3008, lo: 0xa2, hi: 0xa4}, - {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1c, offset 0x12a - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x8c}, - {value: 0x3308, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x3008, lo: 0x9a, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1d, offset 0x138 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x86}, - {value: 0x055d, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8c}, - {value: 0x055d, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0xe105, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x1e, offset 0x142 - {value: 0x0000, lo: 0x01}, - {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x1f, offset 0x144 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x2018, lo: 0xa1, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x20, offset 0x149 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa7}, - {value: 0x2018, lo: 0xa8, hi: 0xbf}, - // Block 0x21, offset 0x14c - {value: 0x0000, lo: 0x02}, - {value: 0x2018, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0xbf}, - // Block 0x22, offset 0x14f - {value: 0x0000, lo: 0x01}, - {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x151 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x15d - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x168 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x170 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x176 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x17c - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x181 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x186 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x189 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x18d - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x193 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x198 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x3b08, lo: 0x94, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1a4 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1ae - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x3340, lo: 0xb4, hi: 0xb5}, - {value: 0x3008, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1b4 - {value: 0x0000, lo: 0x10}, - {value: 0x3008, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x91}, - {value: 0x3b08, lo: 0x92, hi: 0x92}, - {value: 0x3308, lo: 0x93, hi: 0x93}, - {value: 0x0018, lo: 0x94, hi: 0x96}, - {value: 0x0008, lo: 0x97, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1c5 - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x86}, - {value: 0x0218, lo: 0x87, hi: 0x87}, - {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x33c0, lo: 0x8b, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1cf - {value: 0x0000, lo: 0x02}, - {value: 0x0208, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x34, offset 0x1d2 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xa9}, - {value: 0x0208, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1da - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1dd - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x1ea - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x1f2 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x1f6 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0028, lo: 0x9a, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x1fd - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x3308, lo: 0x97, hi: 0x98}, - {value: 0x3008, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x205 - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x3008, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3b08, lo: 0xa0, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xac}, - {value: 0x3008, lo: 0xad, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x215 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xbd}, - {value: 0x3318, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x221 - {value: 0x0000, lo: 0x01}, - {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x223 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3008, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x22d - {value: 0x0000, lo: 0x0b}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x3808, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x239 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3808, lo: 0xaa, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x245 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3008, lo: 0xaa, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3808, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x251 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x3008, lo: 0xa4, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x259 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x25e - {value: 0x0000, lo: 0x09}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, - {value: 0x057d, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x45, offset 0x268 - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x92}, - {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa8}, - {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x46, offset 0x279 - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x27d - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x87}, - {value: 0xe045, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0xe045, lo: 0x98, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0xe045, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x288 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x3318, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x28c - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x295 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x29d - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x9f}, - {value: 0x0080, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xad}, - {value: 0x0080, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2a3 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xa8}, - {value: 0x09c5, lo: 0xa9, hi: 0xa9}, - {value: 0x09e5, lo: 0xaa, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2a8 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4e, offset 0x2ab - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x4f, offset 0x2af - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0e66, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, - {value: 0x0e86, lo: 0xb6, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x50, offset 0x2b5 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x51, offset 0x2b9 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x52, offset 0x2bd - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0x53, offset 0x2c3 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0xab}, - {value: 0x0018, lo: 0xac, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x54, offset 0x2ca - {value: 0x0000, lo: 0x05}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x03f5, lo: 0x90, hi: 0x9f}, - {value: 0x0ea5, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2d0 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2d8 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xae}, - {value: 0xe075, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0x57, offset 0x2df - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x58, offset 0x2ea - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xbf}, - // Block 0x59, offset 0x2f4 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5a, offset 0x2f8 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0x5b, offset 0x2fb - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9e}, - {value: 0x0edd, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5c, offset 0x301 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb2}, - {value: 0x0efd, lo: 0xb3, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5d, offset 0x305 - {value: 0x0020, lo: 0x01}, - {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x5e, offset 0x307 - {value: 0x0020, lo: 0x02}, - {value: 0x171d, lo: 0x80, hi: 0x8f}, - {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x5f, offset 0x30a - {value: 0x0020, lo: 0x01}, - {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x60, offset 0x30c - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x61, offset 0x30f - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x62, offset 0x319 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x63, offset 0x31c - {value: 0x0000, lo: 0x0e}, - {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xb0}, - {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, - {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, - {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, - {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, - {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, - {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, - {value: 0x2abd, lo: 0xb7, hi: 0xb7}, - {value: 0x2add, lo: 0xb8, hi: 0xb9}, - {value: 0x2afd, lo: 0xba, hi: 0xbb}, - {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, - {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x64, offset 0x32b - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x65, offset 0x32f - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x66, offset 0x334 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0x67, offset 0x337 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x68, offset 0x33b - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x69, offset 0x340 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6a, offset 0x345 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb1}, - {value: 0x0018, lo: 0xb2, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6b, offset 0x34b - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6e89, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6c, offset 0x351 - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x3308, lo: 0x8b, hi: 0x8b}, - {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6d, offset 0x360 - {value: 0x0000, lo: 0x05}, - {value: 0x0208, lo: 0x80, hi: 0xb1}, - {value: 0x0108, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6e, offset 0x366 - {value: 0x0000, lo: 0x03}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xbf}, - // Block 0x6f, offset 0x36a - {value: 0x0000, lo: 0x0e}, - {value: 0x3008, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xba}, - {value: 0x0008, lo: 0xbb, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x70, offset 0x379 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x71, offset 0x37e - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x91}, - {value: 0x3008, lo: 0x92, hi: 0x92}, - {value: 0x3808, lo: 0x93, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x72, offset 0x386 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb9}, - {value: 0x3008, lo: 0xba, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x73, offset 0x390 - {value: 0x0000, lo: 0x0a}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x74, offset 0x39b - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x75, offset 0x3a3 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8c}, - {value: 0x3008, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x76, offset 0x3b4 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x77, offset 0x3bd - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x9a}, - {value: 0x0008, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3b08, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x78, offset 0x3cd - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x90}, - {value: 0x0008, lo: 0x91, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x79, offset 0x3da - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x4465, lo: 0x9c, hi: 0x9c}, - {value: 0x447d, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, - {value: 0xe06d, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xaf}, - {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3e4 - {value: 0x0000, lo: 0x04}, - {value: 0x44b5, lo: 0x80, hi: 0x8f}, - {value: 0x44d5, lo: 0x90, hi: 0x9f}, - {value: 0x44f5, lo: 0xa0, hi: 0xaf}, - {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3e9 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3b08, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7c, offset 0x3f6 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7d, offset 0x3fa - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7e, offset 0x3ff - {value: 0x0020, lo: 0x01}, - {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x7f, offset 0x401 - {value: 0x0020, lo: 0x03}, - {value: 0x4d15, lo: 0x80, hi: 0x94}, - {value: 0x4ad5, lo: 0x95, hi: 0x95}, - {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x80, offset 0x405 - {value: 0x0020, lo: 0x01}, - {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x81, offset 0x407 - {value: 0x0020, lo: 0x03}, - {value: 0x5cf5, lo: 0x80, hi: 0x84}, - {value: 0x5655, lo: 0x85, hi: 0x85}, - {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x82, offset 0x40b - {value: 0x0020, lo: 0x08}, - {value: 0x6b55, lo: 0x80, hi: 0x8f}, - {value: 0x6d15, lo: 0x90, hi: 0x90}, - {value: 0x6d55, lo: 0x91, hi: 0xab}, - {value: 0x6ea1, lo: 0xac, hi: 0xac}, - {value: 0x70b5, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x83, offset 0x414 - {value: 0x0020, lo: 0x05}, - {value: 0x72d5, lo: 0x80, hi: 0xad}, - {value: 0x6535, lo: 0xae, hi: 0xae}, - {value: 0x7895, lo: 0xaf, hi: 0xb5}, - {value: 0x6f55, lo: 0xb6, hi: 0xb6}, - {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x84, offset 0x41a - {value: 0x0028, lo: 0x03}, - {value: 0x7c21, lo: 0x80, hi: 0x82}, - {value: 0x7be1, lo: 0x83, hi: 0x83}, - {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x85, offset 0x41e - {value: 0x0038, lo: 0x0f}, - {value: 0x9db1, lo: 0x80, hi: 0x83}, - {value: 0x9e59, lo: 0x84, hi: 0x85}, - {value: 0x9e91, lo: 0x86, hi: 0x87}, - {value: 0x9ec9, lo: 0x88, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa089, lo: 0x92, hi: 0x97}, - {value: 0xa1a1, lo: 0x98, hi: 0x9c}, - {value: 0xa281, lo: 0x9d, hi: 0xb3}, - {value: 0x9d41, lo: 0xb4, hi: 0xb4}, - {value: 0x9db1, lo: 0xb5, hi: 0xb5}, - {value: 0xa789, lo: 0xb6, hi: 0xbb}, - {value: 0xa869, lo: 0xbc, hi: 0xbc}, - {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, - {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x86, offset 0x42e - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x87, offset 0x438 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x88, offset 0x43d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x89, offset 0x440 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8a, offset 0x446 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8b, offset 0x44d - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8c, offset 0x452 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8d, offset 0x456 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8e, offset 0x45c - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xbf}, - // Block 0x8f, offset 0x461 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x90, offset 0x46a - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x91, offset 0x46f - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x92, offset 0x475 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x97}, - {value: 0x8ad5, lo: 0x98, hi: 0x9f}, - {value: 0x8aed, lo: 0xa0, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x93, offset 0x47c - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x8aed, lo: 0xb0, hi: 0xb7}, - {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x94, offset 0x483 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x95, offset 0x48a - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x96, offset 0x48e - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xae}, - {value: 0x0018, lo: 0xaf, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x493 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x98, offset 0x496 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x99, offset 0x49b - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0808, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0808, lo: 0x8a, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0808, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0808, lo: 0xbc, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0808, lo: 0xbf, hi: 0xbf}, - // Block 0x9a, offset 0x4a7 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0818, lo: 0x97, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb6}, - {value: 0x0818, lo: 0xb7, hi: 0xbf}, - // Block 0x9b, offset 0x4ad - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0818, lo: 0xa7, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9c, offset 0x4b2 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0818, lo: 0xbb, hi: 0xbf}, - // Block 0x9d, offset 0x4b9 - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0818, lo: 0x96, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0818, lo: 0xbf, hi: 0xbf}, - // Block 0x9e, offset 0x4c1 - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0818, lo: 0xbc, hi: 0xbd}, - {value: 0x0808, lo: 0xbe, hi: 0xbf}, - // Block 0x9f, offset 0x4c6 - {value: 0x0000, lo: 0x03}, - {value: 0x0818, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0818, lo: 0x92, hi: 0xbf}, - // Block 0xa0, offset 0x4ca - {value: 0x0000, lo: 0x0f}, - {value: 0x0808, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0808, lo: 0x95, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0808, lo: 0x99, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa1, offset 0x4da - {value: 0x0000, lo: 0x06}, - {value: 0x0818, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0818, lo: 0x90, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xbc}, - {value: 0x0818, lo: 0xbd, hi: 0xbf}, - // Block 0xa2, offset 0x4e1 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0x9c}, - {value: 0x0818, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa3, offset 0x4e5 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb8}, - {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa4, offset 0x4e9 - {value: 0x0000, lo: 0x06}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0818, lo: 0x98, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0818, lo: 0xb8, hi: 0xbf}, - // Block 0xa5, offset 0x4f0 - {value: 0x0000, lo: 0x01}, - {value: 0x0808, lo: 0x80, hi: 0xbf}, - // Block 0xa6, offset 0x4f2 - {value: 0x0000, lo: 0x02}, - {value: 0x0808, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x4f5 - {value: 0x0000, lo: 0x02}, - {value: 0x03dd, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x4f8 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x4fc - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0818, lo: 0xa0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xaa, offset 0x500 - {value: 0x0000, lo: 0x05}, - {value: 0x3008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xab, offset 0x506 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xac, offset 0x50f - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbc}, - {value: 0x0340, lo: 0xbd, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xad, offset 0x51b - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xae, offset 0x522 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb2}, - {value: 0x3b08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xaf, offset 0x52b - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x533 - {value: 0x0000, lo: 0x06}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xbe}, - {value: 0x3008, lo: 0xbf, hi: 0xbf}, - // Block 0xb1, offset 0x53a - {value: 0x0000, lo: 0x0d}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x89}, - {value: 0x3308, lo: 0x8a, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb2, offset 0x548 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3808, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x555 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0008, lo: 0x9f, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb4, offset 0x562 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x3308, lo: 0x9f, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa9}, - {value: 0x3b08, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb5, offset 0x56b - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xb6, offset 0x56f - {value: 0x0000, lo: 0x0d}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x84}, - {value: 0x3008, lo: 0x85, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xb7, offset 0x57d - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xb8, offset 0x585 - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xb9, offset 0x590 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xba, offset 0x599 - {value: 0x0000, lo: 0x05}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x3308, lo: 0x9c, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbb, offset 0x59f - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbc, offset 0x5a7 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xbd, offset 0x5b0 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb5}, - {value: 0x3808, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xbe, offset 0x5ba - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xbf, offset 0x5bd - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc0, offset 0x5c9 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc1, offset 0x5cc - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5d1 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xc3, offset 0x5de - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x3b08, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0xbf}, - // Block 0xc4, offset 0x5e7 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x89}, - {value: 0x3308, lo: 0x8a, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x98}, - {value: 0x3b08, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xa2}, - {value: 0x0040, lo: 0xa3, hi: 0xbf}, - // Block 0xc5, offset 0x5f3 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc6, offset 0x5f6 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc7, offset 0x600 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xc8, offset 0x609 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xa9}, - {value: 0x3308, lo: 0xaa, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xc9, offset 0x615 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xca, offset 0x622 - {value: 0x0000, lo: 0x07}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xcb, offset 0x62a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xcc, offset 0x62d - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xcd, offset 0x632 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xce, offset 0x635 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xcf, offset 0x638 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xd0, offset 0x63b - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xd1, offset 0x642 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xd2, offset 0x649 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xd3, offset 0x64d - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xd4, offset 0x658 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xd5, offset 0x65b - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3008, lo: 0x91, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd6, offset 0x661 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x3308, lo: 0x8f, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xd7, offset 0x666 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xd8, offset 0x66a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xd9, offset 0x66d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xda, offset 0x670 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xdb, offset 0x673 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xdc, offset 0x676 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xdd, offset 0x679 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xde, offset 0x67e - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x03c0, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xdf, offset 0x688 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe0, offset 0x68b - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xe1, offset 0x68f - {value: 0x0000, lo: 0x0e}, - {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, - {value: 0xb601, lo: 0x9f, hi: 0x9f}, - {value: 0xb649, lo: 0xa0, hi: 0xa0}, - {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, - {value: 0xb719, lo: 0xa2, hi: 0xa2}, - {value: 0xb781, lo: 0xa3, hi: 0xa3}, - {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x3018, lo: 0xa5, hi: 0xa6}, - {value: 0x3318, lo: 0xa7, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x3018, lo: 0xad, hi: 0xb2}, - {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x3318, lo: 0xbb, hi: 0xbf}, - // Block 0xe2, offset 0x69e - {value: 0x0000, lo: 0x0b}, - {value: 0x3318, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x3318, lo: 0x85, hi: 0x8b}, - {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x3318, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb851, lo: 0xbb, hi: 0xbb}, - {value: 0xb899, lo: 0xbc, hi: 0xbc}, - {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, - {value: 0xb949, lo: 0xbe, hi: 0xbe}, - {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xe3, offset 0x6aa - {value: 0x0000, lo: 0x03}, - {value: 0xba19, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xe4, offset 0x6ae - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x3318, lo: 0x82, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xe5, offset 0x6b3 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe6, offset 0x6b8 - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0xe7, offset 0x6bc - {value: 0x0000, lo: 0x04}, - {value: 0x3308, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xe8, offset 0x6c1 - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x3308, lo: 0xa1, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xe9, offset 0x6ca - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa4}, - {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xea, offset 0x6d5 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0818, lo: 0x87, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xeb, offset 0x6db - {value: 0x0000, lo: 0x07}, - {value: 0x0a08, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0818, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xec, offset 0x6e3 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xed, offset 0x6e7 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xee, offset 0x6eb - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xef, offset 0x6f1 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xf0, offset 0x6f7 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc1c1, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xf1, offset 0x6fc - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xf2, offset 0x6ff - {value: 0x0000, lo: 0x0f}, - {value: 0xc7e9, lo: 0x80, hi: 0x80}, - {value: 0xc839, lo: 0x81, hi: 0x81}, - {value: 0xc889, lo: 0x82, hi: 0x82}, - {value: 0xc8d9, lo: 0x83, hi: 0x83}, - {value: 0xc929, lo: 0x84, hi: 0x84}, - {value: 0xc979, lo: 0x85, hi: 0x85}, - {value: 0xc9c9, lo: 0x86, hi: 0x86}, - {value: 0xca19, lo: 0x87, hi: 0x87}, - {value: 0xca69, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcab9, lo: 0x90, hi: 0x90}, - {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xbf}, - // Block 0xf3, offset 0x70f - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xf4, offset 0x716 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xf5, offset 0x719 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0xbf}, - // Block 0xf6, offset 0x71c - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0xf7, offset 0x720 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0xf8, offset 0x726 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0xf9, offset 0x72b - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xfa, offset 0x730 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0xfb, offset 0x735 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0xbf}, - // Block 0xfc, offset 0x738 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0xfd, offset 0x73d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xfe, offset 0x740 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xff, offset 0x743 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x100, offset 0x747 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x101, offset 0x74b - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x102, offset 0x74e - {value: 0x0020, lo: 0x0f}, - {value: 0xdeb9, lo: 0x80, hi: 0x89}, - {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, - {value: 0xdff9, lo: 0x8b, hi: 0x9c}, - {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, - {value: 0xe239, lo: 0x9e, hi: 0xa2}, - {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, - {value: 0xe2d9, lo: 0xa4, hi: 0xab}, - {value: 0x7ed5, lo: 0xac, hi: 0xac}, - {value: 0xe3d9, lo: 0xad, hi: 0xaf}, - {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, - {value: 0xe439, lo: 0xb1, hi: 0xb6}, - {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, - {value: 0xe4f9, lo: 0xba, hi: 0xba}, - {value: 0x8edd, lo: 0xbb, hi: 0xbb}, - {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0x103, offset 0x75e - {value: 0x0020, lo: 0x10}, - {value: 0x937d, lo: 0x80, hi: 0x80}, - {value: 0xf099, lo: 0x81, hi: 0x86}, - {value: 0x939d, lo: 0x87, hi: 0x8a}, - {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, - {value: 0xf159, lo: 0x8c, hi: 0x96}, - {value: 0x941d, lo: 0x97, hi: 0x97}, - {value: 0xf2b9, lo: 0x98, hi: 0xa3}, - {value: 0x943d, lo: 0xa4, hi: 0xa6}, - {value: 0xf439, lo: 0xa7, hi: 0xaa}, - {value: 0x949d, lo: 0xab, hi: 0xab}, - {value: 0xf4b9, lo: 0xac, hi: 0xac}, - {value: 0x94bd, lo: 0xad, hi: 0xad}, - {value: 0xf4d9, lo: 0xae, hi: 0xaf}, - {value: 0x94dd, lo: 0xb0, hi: 0xb1}, - {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x2040, lo: 0xbf, hi: 0xbf}, - // Block 0x104, offset 0x76f - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0340, lo: 0x81, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x9f}, - {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0x105, offset 0x774 - {value: 0x0000, lo: 0x01}, - {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0x106, offset 0x776 - {value: 0x0000, lo: 0x01}, - {value: 0x33c0, lo: 0x80, hi: 0xbf}, - // Block 0x107, offset 0x778 - {value: 0x0000, lo: 0x02}, - {value: 0x33c0, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, -} - -// Total table size 42114 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go deleted file mode 100644 index c515d7ad2..000000000 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ /dev/null @@ -1,4653 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// +build go1.13 - -package idna - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "11.0.0" - -var mappings string = "" + // Size: 8175 bytes - "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + - "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + - "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + - "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + - "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + - "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + - "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + - "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + - "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + - "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + - ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + - "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + - "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + - "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + - "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + - "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + - "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + - "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + - "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + - "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + - "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + - "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + - "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + - "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + - "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + - "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + - "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + - "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + - "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + - "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + - "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + - "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + - "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + - "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + - "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + - "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + - "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + - "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + - "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + - "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + - "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + - "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + - "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + - "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + - "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + - " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + - "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + - "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + - "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + - "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + - "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + - "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + - "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + - "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + - "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + - "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + - "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + - "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + - "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + - "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + - "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + - "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + - "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + - "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + - "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + - "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + - "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + - "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + - "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + - "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + - "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + - "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + - "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + - "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + - "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + - "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + - "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + - "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + - "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + - "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + - "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + - "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + - "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + - "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + - "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + - "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + - "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + - "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + - "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + - "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + - "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + - "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + - "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + - "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + - "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + - "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + - "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + - "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + - "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + - "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + - "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + - "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + - "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + - "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + - "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + - "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + - "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + - "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + - "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" - -var xorData string = "" + // Size: 4855 bytes - "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + - "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + - "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + - "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + - "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + - "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + - "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + - "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + - "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + - "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + - "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + - "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + - "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + - "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + - "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + - "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + - "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + - "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + - "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + - "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + - "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + - "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + - "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + - "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + - "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + - "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + - "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + - "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + - "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + - "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + - "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + - "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + - "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + - "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + - "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + - "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + - "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + - "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + - "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + - "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + - "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + - "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + - ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + - "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + - "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + - "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + - "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + - "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + - "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + - "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + - "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + - "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + - "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + - "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + - "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + - "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + - "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + - "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + - "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + - "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + - "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + - "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + - "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + - "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + - "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + - "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + - "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + - "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + - "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + - "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + - "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + - "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + - "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + - "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + - "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + - "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + - "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + - "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + - "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + - "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + - "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + - "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + - "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + - "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + - "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + - "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + - "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + - "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + - "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + - "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + - "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + - "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + - "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + - ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + - "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + - "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + - "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + - "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + - "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + - "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + - "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + - "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + - "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + - "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + - "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + - "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + - "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + - "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + - "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + - "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + - "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + - "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + - "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + - "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + - "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + - "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + - "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + - "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + - "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + - "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + - "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + - "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + - "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + - "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + - "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + - "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + - "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + - "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + - "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + - "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + - "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + - "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + - "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + - "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + - "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + - "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + - "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + - "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + - "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + - "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + - "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + - "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + - "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + - "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + - "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + - "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + - "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + - "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + - "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + - "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + - "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + - "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + - "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + - "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + - "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + - "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + - "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + - "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + - "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + - "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + - "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + - "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + - "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + - "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + - "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + - "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + - "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + - "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + - "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + - "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + - "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + - "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + - "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + - "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + - "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + - "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + - "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + - "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + - "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + - "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + - "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + - "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + - "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + - "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + - "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + - "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + - "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + - "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + - "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + - "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + - "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + - "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + - "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + - "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + - "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + - "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + - "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + - "\x04\x03\x0c?\x05\x03\x0c" + - "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + - "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + - "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + - "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + - "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + - "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + - "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + - "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + - "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + - "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + - "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + - "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + - "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + - "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + - "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + - "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + - "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + - "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + - "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + - "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + - "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + - "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + - "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + - "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + - "\x05\x22\x05\x03\x050\x1d" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c. -type idnaTrie struct{} - -func newIdnaTrie(i int) *idnaTrie { - return &idnaTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { - switch { - case n < 125: - return uint16(idnaValues[n<<6+uint32(b)]) - default: - n -= 125 - return uint16(idnaSparse.lookup(n, b)) - } -} - -// idnaValues: 127 blocks, 8128 entries, 16256 bytes -// The third block is the zero block. -var idnaValues = [8128]uint16{ - // Block 0x0, offset 0x0 - 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, - 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, - 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, - 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, - 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, - 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, - 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, - 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, - 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, - 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, - 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, - // Block 0x1, offset 0x40 - 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, - 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, - 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, - 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, - 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, - 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, - 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, - 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, - 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, - 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, - 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, - 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, - 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, - 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, - 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, - 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, - // Block 0x4, offset 0x100 - 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, - 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, - 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, - 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, - 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, - 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, - 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, - 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, - 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, - // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, - 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, - 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, - 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, - 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, - 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, - 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, - 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, - 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, - // Block 0x6, offset 0x180 - 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, - 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, - 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, - 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, - 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, - 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, - 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, - 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, - 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, - 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, - 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, - 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, - 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, - 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, - 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, - 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, - 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, - 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, - 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, - 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, - // Block 0x8, offset 0x200 - 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, - 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, - 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, - 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, - 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, - 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, - 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, - 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, - 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, - // Block 0x9, offset 0x240 - 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, - 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, - 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, - 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, - 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, - 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, - 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, - 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, - 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, - // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, - 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, - 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, - 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, - 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, - 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, - 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, - 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, - 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, - 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, - 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, - 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, - 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, - 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, - 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, - 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, - 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, - 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, - // Block 0xc, offset 0x300 - 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, - 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, - 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, - 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, - 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, - 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, - 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, - 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, - 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, - 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, - 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, - // Block 0xd, offset 0x340 - 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, - 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, - 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, - 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, - 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, - 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, - 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, - 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, - 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, - 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, - 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, - // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, - 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, - 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, - 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, - 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, - 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, - 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, - 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, - 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, - 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, - 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, - 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, - 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, - 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, - 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, - 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, - 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, - 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, - 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, - 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, - 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, - // Block 0x10, offset 0x400 - 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, - 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, - 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, - 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, - 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, - 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, - 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, - 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, - 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, - 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, - 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, - // Block 0x11, offset 0x440 - 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, - 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, - 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, - 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, - 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, - 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, - 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, - 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, - 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, - 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, - // Block 0x12, offset 0x480 - 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, - 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, - 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, - 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, - 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, - 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, - 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, - 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, - 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, - 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, - 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, - 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, - 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, - 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, - 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, - 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, - 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, - 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, - 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, - 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, - 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, - // Block 0x14, offset 0x500 - 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, - 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, - 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, - 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, - 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, - 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, - 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, - 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, - 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, - 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, - 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, - // Block 0x15, offset 0x540 - 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, - 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, - 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, - 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, - 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, - 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, - 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, - 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, - 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, - // Block 0x16, offset 0x580 - 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, - 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, - 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, - 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, - 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, - 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, - 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, - 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, - 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, - 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, - 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, - 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, - 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, - 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, - 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, - 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, - 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, - // Block 0x18, offset 0x600 - 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, - 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, - 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, - 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, - 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, - 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, - 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, - 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, - // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, - 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, - 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, - 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, - 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, - 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, - 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, - // Block 0x1a, offset 0x680 - 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, - 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, - 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, - 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, - 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, - 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, - 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, - 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, - 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, - 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, - 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, - 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, - 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, - 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, - 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, - 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, - 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, - // Block 0x1c, offset 0x700 - 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, - 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, - 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, - 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, - 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, - 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, - 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, - 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, - // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, - 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, - 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, - 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, - 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, - // Block 0x1e, offset 0x780 - 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, - 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, - 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, - 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, - 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, - 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, - 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, - 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, - 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, - 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, - 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, - 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, - 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, - // Block 0x20, offset 0x800 - 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, - 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, - 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, - 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, - 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, - 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, - 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, - // Block 0x21, offset 0x840 - 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, - 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, - 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, - 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, - 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, - 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, - 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, - 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, - 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, - 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, - // Block 0x22, offset 0x880 - 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, - 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, - 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, - 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, - 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, - 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, - 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, - 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, - 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, - 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, - 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, - 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, - 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, - 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, - 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, - 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, - 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, - // Block 0x24, offset 0x900 - 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, - 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, - 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, - 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, - 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, - 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, - 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, - 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, - 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, - 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, - 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, - // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, - 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, - 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, - 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, - 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, - // Block 0x26, offset 0x980 - 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, - 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, - 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, - 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, - 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, - 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, - 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, - 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, - 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, - 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, - 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, - // Block 0x27, offset 0x9c0 - 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, - 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, - 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, - 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, - 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, - 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, - 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, - 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, - 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, - 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, - // Block 0x28, offset 0xa00 - 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, - 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, - 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, - 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, - 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, - 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, - 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, - 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, - 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, - 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, - 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, - // Block 0x29, offset 0xa40 - 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, - 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, - 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, - 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, - 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, - 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, - 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, - 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, - 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, - 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, - // Block 0x2a, offset 0xa80 - 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, - 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, - 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, - 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, - 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, - 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, - 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, - 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, - 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, - 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, - 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, - // Block 0x2b, offset 0xac0 - 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, - 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, - 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, - 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, - 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, - 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, - 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, - 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, - 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, - 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, - 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, - // Block 0x2c, offset 0xb00 - 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, - 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, - 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, - 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, - 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, - 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, - 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, - 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, - // Block 0x2d, offset 0xb40 - 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, - 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, - 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, - 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, - 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, - 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, - 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, - 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, - 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, - 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, - 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, - // Block 0x2e, offset 0xb80 - 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, - 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, - 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, - 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, - 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, - 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, - 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, - 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, - 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, - 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, - 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, - 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, - 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, - 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, - 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, - 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, - 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, - 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, - // Block 0x30, offset 0xc00 - 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, - 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, - 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, - 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, - 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, - 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, - 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, - 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, - 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, - 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, - 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, - // Block 0x31, offset 0xc40 - 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, - 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, - 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, - 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, - 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, - 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, - 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, - 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, - 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, - 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, - 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, - // Block 0x32, offset 0xc80 - 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, - 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, - 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, - 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, - 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, - 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, - 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, - 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, - 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, - 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, - 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, - // Block 0x33, offset 0xcc0 - 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, - 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, - 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, - 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, - 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, - 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, - 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, - 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, - 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, - 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, - 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, - // Block 0x34, offset 0xd00 - 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, - 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, - 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, - 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, - 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, - 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, - 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, - 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, - 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, - 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, - 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, - // Block 0x35, offset 0xd40 - 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, - 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, - 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, - 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, - 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, - 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, - 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, - 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, - 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, - 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, - 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, - // Block 0x36, offset 0xd80 - 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, - 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, - 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, - 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, - 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, - 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, - 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, - 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, - 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, - 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, - 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, - 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, - 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, - 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, - 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, - 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, - 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, - 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, - 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, - 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, - 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, - // Block 0x38, offset 0xe00 - 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, - 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, - 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, - 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, - 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, - 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, - 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, - 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, - 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, - 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, - // Block 0x39, offset 0xe40 - 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, - 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, - 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, - 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, - 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, - 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, - 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, - 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, - 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, - 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, - 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, - // Block 0x3a, offset 0xe80 - 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, - 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, - 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, - 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, - 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, - 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, - 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, - 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, - 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, - 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, - // Block 0x3b, offset 0xec0 - 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, - 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, - 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, - 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, - 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, - 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, - 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, - 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, - 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, - 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, - 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, - // Block 0x3c, offset 0xf00 - 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, - 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, - 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, - 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, - 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, - 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, - 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, - 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, - 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, - 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, - 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, - // Block 0x3d, offset 0xf40 - 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, - 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, - 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, - 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, - 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, - 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, - 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, - 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, - 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, - 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, - 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, - // Block 0x3e, offset 0xf80 - 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, - 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, - 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, - 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, - 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, - 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, - 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, - 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, - 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, - 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, - 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, - 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, - 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, - 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, - 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, - 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, - 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, - 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, - 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, - 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, - 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, - // Block 0x40, offset 0x1000 - 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, - 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, - 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, - 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, - 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, - 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, - 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, - 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, - 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, - 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, - 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, - // Block 0x41, offset 0x1040 - 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, - 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, - 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, - 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, - 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, - 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, - 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, - 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, - 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, - 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, - 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, - // Block 0x42, offset 0x1080 - 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, - 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, - 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, - 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, - 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, - 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, - 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, - 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, - 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, - 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, - 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, - 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, - 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, - 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, - 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, - 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, - 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, - 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, - 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, - 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, - 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, - // Block 0x44, offset 0x1100 - 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, - 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, - 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, - 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, - 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, - 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, - 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, - 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, - 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, - 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, - // Block 0x45, offset 0x1140 - 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, - 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, - 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, - 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, - 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, - 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, - 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, - 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, - 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, - // Block 0x46, offset 0x1180 - 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, - 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, - 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, - 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, - 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, - 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, - 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, - 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, - 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, - // Block 0x47, offset 0x11c0 - 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, - 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, - 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, - 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, - 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, - 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, - 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, - 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, - 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, - 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, - 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, - // Block 0x48, offset 0x1200 - 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, - 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, - 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, - 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, - 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, - 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, - 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, - 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, - 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, - 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040, - 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, - // Block 0x49, offset 0x1240 - 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, - 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, - 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, - 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, - 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, - 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, - 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, - 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, - 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, - 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, - 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, - // Block 0x4a, offset 0x1280 - 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, - 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, - 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, - 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, - 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, - 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, - 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, - 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, - 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, - 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, - 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, - 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, - 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, - 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, - 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, - 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, - 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, - 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, - 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, - 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, - 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, - // Block 0x4c, offset 0x1300 - 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, - 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, - 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, - 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, - 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, - 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, - 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, - 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, - 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, - 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, - 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, - // Block 0x4d, offset 0x1340 - 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, - 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, - 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, - 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, - 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, - 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, - 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, - 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, - 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, - 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, - 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, - // Block 0x4e, offset 0x1380 - 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, - 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, - 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, - 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, - 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, - 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, - 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, - 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, - 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, - 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, - 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, - 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, - 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, - 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, - 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, - 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, - 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, - 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, - 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, - 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, - 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, - // Block 0x50, offset 0x1400 - 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, - 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, - 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, - 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, - 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, - 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, - 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, - 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, - 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, - 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, - 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, - // Block 0x51, offset 0x1440 - 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, - 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, - 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, - 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, - 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, - 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, - 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, - 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, - 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, - 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, - 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, - // Block 0x52, offset 0x1480 - 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, - 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, - 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, - 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, - 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, - 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, - 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, - 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, - 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, - 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, - 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, - 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, - 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, - 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, - 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, - 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, - 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, - 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, - 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, - 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, - 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, - // Block 0x54, offset 0x1500 - 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, - 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, - 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, - 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, - 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, - 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, - 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, - 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, - 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, - // Block 0x55, offset 0x1540 - 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, - 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, - 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, - 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, - 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, - 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, - 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, - 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, - 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, - 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, - 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, - // Block 0x56, offset 0x1580 - 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, - 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, - 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, - 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, - 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, - 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, - 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, - 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, - 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, - 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, - 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, - // Block 0x57, offset 0x15c0 - 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, - 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, - 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, - 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, - 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, - 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, - 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, - 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, - 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, - 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, - 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, - // Block 0x58, offset 0x1600 - 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, - 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, - 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, - 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, - 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, - 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, - 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, - 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, - 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, - 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, - 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, - // Block 0x59, offset 0x1640 - 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, - 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, - 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, - 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, - 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, - 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, - 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, - 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, - 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, - 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, - 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, - // Block 0x5a, offset 0x1680 - 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, - 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, - 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, - 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, - 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, - 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, - 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, - 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, - 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, - 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, - 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, - 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, - 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, - 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, - 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, - 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, - 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, - 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, - 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, - 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, - 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, - // Block 0x5c, offset 0x1700 - 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, - 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, - 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, - 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, - 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, - 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, - 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, - 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, - 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, - // Block 0x5d, offset 0x1740 - 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, - 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, - 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, - 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, - 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, - 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, - 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, - 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, - 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, - 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, - // Block 0x5e, offset 0x1780 - 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, - 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, - 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, - 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, - 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, - 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, - 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, - 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, - 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, - 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, - 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, - 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, - 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, - 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, - 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, - 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, - 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, - 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, - 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, - // Block 0x60, offset 0x1800 - 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, - 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, - 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, - 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, - 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, - 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, - 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, - 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, - 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, - 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, - 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, - // Block 0x61, offset 0x1840 - 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, - 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, - 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, - 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, - 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, - 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, - 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, - 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, - 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, - 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, - 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, - // Block 0x62, offset 0x1880 - 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, - 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, - 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, - 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, - 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, - 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, - 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, - 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, - 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, - 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, - 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, - 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, - 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, - 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, - 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, - 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, - 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, - 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, - 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, - 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, - 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, - // Block 0x64, offset 0x1900 - 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, - 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, - 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, - 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, - 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, - 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, - 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, - 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, - 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, - 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, - 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, - // Block 0x65, offset 0x1940 - 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, - 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, - 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, - 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, - 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, - 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, - 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, - 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, - 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, - 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, - 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, - // Block 0x66, offset 0x1980 - 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, - 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, - 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, - 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, - 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, - 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, - 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, - 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, - 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, - 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, - 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, - 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, - 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, - 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, - 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, - 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, - 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, - 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, - 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, - 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, - 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, - 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, - 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, - 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, - 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, - 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, - 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, - 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, - 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, - 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, - 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, - // Block 0x69, offset 0x1a40 - 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, - 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, - 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, - 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, - 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, - 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, - 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, - 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, - 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, - 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, - 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, - // Block 0x6a, offset 0x1a80 - 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, - 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, - 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, - 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, - 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, - 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, - 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, - 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, - 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, - 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, - 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, - // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, - 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, - 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, - 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, - 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, - 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, - 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, - 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, - 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, - 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, - 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, - // Block 0x6c, offset 0x1b00 - 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, - 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, - 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, - 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, - 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, - 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, - 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, - 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, - 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, - 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, - 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, - // Block 0x6d, offset 0x1b40 - 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, - 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, - 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, - 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, - 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, - 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, - 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, - 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, - 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, - 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, - 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, - // Block 0x6e, offset 0x1b80 - 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, - 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, - 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, - 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, - 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, - 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, - 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, - 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, - 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, - 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, - 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, - // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, - 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, - 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, - 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, - 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, - 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, - 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, - 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, - 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, - 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, - 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, - // Block 0x70, offset 0x1c00 - 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, - 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, - 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, - 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, - 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, - 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, - 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, - 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, - 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, - 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, - 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, - // Block 0x71, offset 0x1c40 - 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, - 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, - 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, - 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, - 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, - 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, - 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, - 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, - 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, - 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, - 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, - // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, - 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, - 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, - 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, - 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, - 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, - 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, - // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, - 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, - 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, - 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, - 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, - 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, - 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, - 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, - 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, - 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, - 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, - // Block 0x74, offset 0x1d00 - 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, - 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, - 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, - 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, - 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, - 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, - 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, - 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, - 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, - 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, - 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, - // Block 0x75, offset 0x1d40 - 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, - 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, - 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, - 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, - 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, - 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, - 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, - 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, - 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, - 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, - 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, - // Block 0x76, offset 0x1d80 - 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, - 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, - 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, - 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, - 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, - 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, - 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, - 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, - 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, - 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, - 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, - // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, - 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, - 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, - 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, - 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, - 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, - 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, - 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, - 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, - 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, - 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, - // Block 0x78, offset 0x1e00 - 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, - 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, - 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, - 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, - 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, - 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, - 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, - 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, - 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, - 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, - 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, - // Block 0x79, offset 0x1e40 - 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, - 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, - 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, - 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, - 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, - 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, - 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, - 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, - 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, - 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, - 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, - // Block 0x7a, offset 0x1e80 - 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, - 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, - 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, - 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, - 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, - 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, - 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, - 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, - 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, - 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, - 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, - // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, - 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, - 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, - 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, - 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, - 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, - 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, - 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, - 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, - 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, - 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, - // Block 0x7c, offset 0x1f00 - 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, - 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, - 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, - 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, - 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, - 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, - 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, - 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, - 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, - 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, - 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, - // Block 0x7d, offset 0x1f40 - 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, - 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, - 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, - 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, - 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, - 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, - 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, - 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, - 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, - 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, - 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, - // Block 0x7e, offset 0x1f80 - 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, - 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, - 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, - 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, - 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, - 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, - 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, - 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, - 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, - 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, - 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, -} - -// idnaIndex: 36 blocks, 2304 entries, 4608 bytes -// Block 0 is the zero block. -var idnaIndex = [2304]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, - 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, - 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, - // Block 0x4, offset 0x100 - 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, - 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, - // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, - 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, - 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, - 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, - 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, - 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, - 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, - 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, - // Block 0x6, offset 0x180 - 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, - 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, - 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, - 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, - 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, - 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, - 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, - 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, - 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, - 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, - 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, - 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, - 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, - 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, - // Block 0x8, offset 0x200 - 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, - 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, - 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, - 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, - 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, - 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, - 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, - 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, - // Block 0x9, offset 0x240 - 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, - 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, - 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, - 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, - 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, - 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, - 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, - 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, - // Block 0xa, offset 0x280 - 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, - 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, - 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, - 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, - 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, - 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, - 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, - 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, - 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, - 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, - 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, - 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, - 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, - 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, - 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, - // Block 0xc, offset 0x300 - 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, - 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, - 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, - 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, - // Block 0xd, offset 0x340 - 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, - 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, - 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, - 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, - 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, - 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, - 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, - 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, - // Block 0xe, offset 0x380 - 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, - 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, - 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, - 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, - 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, - 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, - 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, - 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, - 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, - 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, - 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, - 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, - 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, - 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, - 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba, - // Block 0x10, offset 0x400 - 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, - 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, - 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, - 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, - 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, - 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, - 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba, - 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, - // Block 0x11, offset 0x440 - 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, - 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba, - 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba, - 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, - 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, - 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, - 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, - 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, - // Block 0x12, offset 0x480 - 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, - 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, - 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, - 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, - 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, - 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, - 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, - 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, - // Block 0x13, offset 0x4c0 - 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, - 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, - 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, - 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, - 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, - 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, - 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, - 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, - // Block 0x14, offset 0x500 - 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, - 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, - 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, - 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, - 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, - 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba, - 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, - 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c, - // Block 0x15, offset 0x540 - 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, - 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, - 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, - 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d, - 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, - 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, - 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, - 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, - // Block 0x16, offset 0x580 - 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f, - 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, - 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, - 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, - 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, - 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, - 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, - 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168, - 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba, - 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, - 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, - 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, - 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, - 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, - // Block 0x18, offset 0x600 - 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, - 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, - 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, - 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, - 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba, - 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, - 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, - 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, - // Block 0x19, offset 0x640 - 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179, - 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, - 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d, - 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184, - 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, - 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, - 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, - // Block 0x1a, offset 0x680 - 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, - 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, - 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, - 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, - 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, - 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, - 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, - 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, - 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, - 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, - 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, - 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, - 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, - 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, - 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, - // Block 0x1c, offset 0x700 - 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, - 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, - 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, - 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, - 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, - 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, - 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, - 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, - // Block 0x1d, offset 0x740 - 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, - 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, - 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, - 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, - 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, - 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a, - 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, - 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, - // Block 0x1e, offset 0x780 - 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, - 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, - 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, - 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, - 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b, - 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, - 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, - 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, - // Block 0x1f, offset 0x7c0 - 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, - 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, - 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, - 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, - 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, - 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, - // Block 0x20, offset 0x800 - 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, - 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, - 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, - 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, - 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, - 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, - 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, - 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, - // Block 0x21, offset 0x840 - 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190, - 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, - 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, - 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, - 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, - 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, - 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, - 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, - // Block 0x22, offset 0x880 - 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, - 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, - 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, - 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, - 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, - 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, - 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, - 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, - 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, -} - -// idnaSparseOffset: 276 entries, 552 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca} - -// idnaSparseValues: 1997 entries, 7988 bytes -var idnaSparseValues = [1997]valueRange{ - // Block 0x0, offset 0x0 - {value: 0x0000, lo: 0x07}, - {value: 0xe105, lo: 0x80, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x97}, - {value: 0xe105, lo: 0x98, hi: 0x9e}, - {value: 0x001f, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbf}, - // Block 0x1, offset 0x8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0xe01d, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0335, lo: 0x83, hi: 0x83}, - {value: 0x034d, lo: 0x84, hi: 0x84}, - {value: 0x0365, lo: 0x85, hi: 0x85}, - {value: 0xe00d, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0xe00d, lo: 0x88, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x89}, - {value: 0xe00d, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe00d, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0x8d}, - {value: 0xe00d, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0xbf}, - // Block 0x2, offset 0x19 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, - {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, - {value: 0x034d, lo: 0xb4, hi: 0xb4}, - {value: 0x0395, lo: 0xb5, hi: 0xb5}, - {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbf}, - // Block 0x3, offset 0x25 - {value: 0x0000, lo: 0x01}, - {value: 0x3308, lo: 0x80, hi: 0xbf}, - // Block 0x4, offset 0x27 - {value: 0x0000, lo: 0x04}, - {value: 0x03f5, lo: 0x80, hi: 0x8f}, - {value: 0xe105, lo: 0x90, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x5, offset 0x2c - {value: 0x0000, lo: 0x06}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x0545, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x6, offset 0x33 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0018, lo: 0x89, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0xbd}, - {value: 0x0818, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x7, offset 0x3e - {value: 0x0000, lo: 0x0b}, - {value: 0x0818, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x82}, - {value: 0x0818, lo: 0x83, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x85}, - {value: 0x0818, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xae}, - {value: 0x0808, lo: 0xaf, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x8, offset 0x4a - {value: 0x0000, lo: 0x03}, - {value: 0x0a08, lo: 0x80, hi: 0x87}, - {value: 0x0c08, lo: 0x88, hi: 0x99}, - {value: 0x0a08, lo: 0x9a, hi: 0xbf}, - // Block 0x9, offset 0x4e - {value: 0x0000, lo: 0x0e}, - {value: 0x3308, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0c08, lo: 0x8d, hi: 0x8d}, - {value: 0x0a08, lo: 0x8e, hi: 0x98}, - {value: 0x0c08, lo: 0x99, hi: 0x9b}, - {value: 0x0a08, lo: 0x9c, hi: 0xaa}, - {value: 0x0c08, lo: 0xab, hi: 0xac}, - {value: 0x0a08, lo: 0xad, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb1}, - {value: 0x0a08, lo: 0xb2, hi: 0xb2}, - {value: 0x0c08, lo: 0xb3, hi: 0xb4}, - {value: 0x0a08, lo: 0xb5, hi: 0xb7}, - {value: 0x0c08, lo: 0xb8, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbf}, - // Block 0xa, offset 0x5d - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xb0}, - {value: 0x0808, lo: 0xb1, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xb, offset 0x62 - {value: 0x0000, lo: 0x09}, - {value: 0x0808, lo: 0x80, hi: 0x89}, - {value: 0x0a08, lo: 0x8a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x0818, lo: 0xbe, hi: 0xbf}, - // Block 0xc, offset 0x6c - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x99}, - {value: 0x0808, lo: 0x9a, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa3}, - {value: 0x0808, lo: 0xa4, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa7}, - {value: 0x0808, lo: 0xa8, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0818, lo: 0xb0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd, offset 0x78 - {value: 0x0000, lo: 0x0d}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0a08, lo: 0xa0, hi: 0xa9}, - {value: 0x0c08, lo: 0xaa, hi: 0xac}, - {value: 0x0808, lo: 0xad, hi: 0xad}, - {value: 0x0c08, lo: 0xae, hi: 0xae}, - {value: 0x0a08, lo: 0xaf, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb2}, - {value: 0x0a08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0a08, lo: 0xb6, hi: 0xb8}, - {value: 0x0c08, lo: 0xb9, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xe, offset 0x86 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x92}, - {value: 0x3308, lo: 0x93, hi: 0xa1}, - {value: 0x0840, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xbf}, - // Block 0xf, offset 0x8b - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0x94 - {value: 0x0000, lo: 0x0f}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x3008, lo: 0x86, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x3008, lo: 0x8a, hi: 0x8c}, - {value: 0x3b08, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x11, offset 0xa4 - {value: 0x0000, lo: 0x0d}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb2 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x3b08, lo: 0xbb, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x13, offset 0xbe - {value: 0x0000, lo: 0x0b}, - {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xca - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x3b08, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x3008, lo: 0x98, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x15, offset 0xdb - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb9}, - {value: 0x3b08, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x16, offset 0xe5 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x8e}, - {value: 0x0018, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x17, offset 0xec - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, - {value: 0x0008, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x18, offset 0xf9 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe03d, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x19, offset 0x10a - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1a, offset 0x111 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1b, offset 0x11c - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x3008, lo: 0x96, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x3308, lo: 0x9e, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x3008, lo: 0xa2, hi: 0xa4}, - {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1c, offset 0x12b - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x8c}, - {value: 0x3308, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x3008, lo: 0x9a, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1d, offset 0x139 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x86}, - {value: 0x055d, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8c}, - {value: 0x055d, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0xe105, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x1e, offset 0x143 - {value: 0x0000, lo: 0x01}, - {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x1f, offset 0x145 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x2018, lo: 0xa1, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x20, offset 0x14a - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa7}, - {value: 0x2018, lo: 0xa8, hi: 0xbf}, - // Block 0x21, offset 0x14d - {value: 0x0000, lo: 0x02}, - {value: 0x2018, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0xbf}, - // Block 0x22, offset 0x150 - {value: 0x0000, lo: 0x01}, - {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x152 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x15e - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x169 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x171 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x177 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x17d - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x182 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x187 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x18a - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x18e - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x194 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x199 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x3b08, lo: 0x94, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1a5 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1af - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x3340, lo: 0xb4, hi: 0xb5}, - {value: 0x3008, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1b5 - {value: 0x0000, lo: 0x10}, - {value: 0x3008, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x91}, - {value: 0x3b08, lo: 0x92, hi: 0x92}, - {value: 0x3308, lo: 0x93, hi: 0x93}, - {value: 0x0018, lo: 0x94, hi: 0x96}, - {value: 0x0008, lo: 0x97, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1c6 - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x86}, - {value: 0x0218, lo: 0x87, hi: 0x87}, - {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x33c0, lo: 0x8b, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1d0 - {value: 0x0000, lo: 0x02}, - {value: 0x0208, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x34, offset 0x1d3 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xa9}, - {value: 0x0208, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1db - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1de - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x1eb - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x1f3 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x1f7 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0028, lo: 0x9a, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x1fe - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x3308, lo: 0x97, hi: 0x98}, - {value: 0x3008, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x206 - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x3008, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3b08, lo: 0xa0, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xac}, - {value: 0x3008, lo: 0xad, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x216 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xbd}, - {value: 0x3318, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x222 - {value: 0x0000, lo: 0x01}, - {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x224 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3008, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x22e - {value: 0x0000, lo: 0x0b}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x3808, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x23a - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3808, lo: 0xaa, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x246 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3008, lo: 0xaa, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3808, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x252 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x3008, lo: 0xa4, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x25a - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x25f - {value: 0x0000, lo: 0x09}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, - {value: 0x057d, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x45, offset 0x269 - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x92}, - {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa8}, - {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x46, offset 0x27a - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x27e - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x87}, - {value: 0xe045, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0xe045, lo: 0x98, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0xe045, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x289 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x3318, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x28d - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x296 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x29e - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x9f}, - {value: 0x0080, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xad}, - {value: 0x0080, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2a4 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xa8}, - {value: 0x09c5, lo: 0xa9, hi: 0xa9}, - {value: 0x09e5, lo: 0xaa, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2a9 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4e, offset 0x2ac - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x4f, offset 0x2b0 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0e66, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, - {value: 0x0e86, lo: 0xb6, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x50, offset 0x2b6 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x51, offset 0x2ba - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x52, offset 0x2be - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0xbf}, - // Block 0x53, offset 0x2c2 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x54, offset 0x2c7 - {value: 0x0000, lo: 0x05}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x03f5, lo: 0x90, hi: 0x9f}, - {value: 0x0ea5, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2cd - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2d5 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xae}, - {value: 0xe075, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0x57, offset 0x2dc - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x58, offset 0x2e7 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xbf}, - // Block 0x59, offset 0x2f1 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5a, offset 0x2f5 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0xbf}, - // Block 0x5b, offset 0x2f8 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9e}, - {value: 0x0edd, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5c, offset 0x2fe - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb2}, - {value: 0x0efd, lo: 0xb3, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5d, offset 0x302 - {value: 0x0020, lo: 0x01}, - {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x5e, offset 0x304 - {value: 0x0020, lo: 0x02}, - {value: 0x171d, lo: 0x80, hi: 0x8f}, - {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x5f, offset 0x307 - {value: 0x0020, lo: 0x01}, - {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x60, offset 0x309 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x61, offset 0x30c - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x62, offset 0x316 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x63, offset 0x319 - {value: 0x0000, lo: 0x0e}, - {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb0}, - {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, - {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, - {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, - {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, - {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, - {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, - {value: 0x2abd, lo: 0xb7, hi: 0xb7}, - {value: 0x2add, lo: 0xb8, hi: 0xb9}, - {value: 0x2afd, lo: 0xba, hi: 0xbb}, - {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, - {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x64, offset 0x328 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x65, offset 0x32c - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x66, offset 0x331 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x67, offset 0x334 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x68, offset 0x338 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x69, offset 0x33d - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6a, offset 0x342 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb1}, - {value: 0x0018, lo: 0xb2, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6b, offset 0x348 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6e89, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6c, offset 0x34e - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x3308, lo: 0x8b, hi: 0x8b}, - {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6d, offset 0x35d - {value: 0x0000, lo: 0x05}, - {value: 0x0208, lo: 0x80, hi: 0xb1}, - {value: 0x0108, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6e, offset 0x363 - {value: 0x0000, lo: 0x03}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xbf}, - // Block 0x6f, offset 0x367 - {value: 0x0000, lo: 0x0e}, - {value: 0x3008, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xba}, - {value: 0x0008, lo: 0xbb, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x70, offset 0x376 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x71, offset 0x37b - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x91}, - {value: 0x3008, lo: 0x92, hi: 0x92}, - {value: 0x3808, lo: 0x93, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x72, offset 0x383 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb9}, - {value: 0x3008, lo: 0xba, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x73, offset 0x38d - {value: 0x0000, lo: 0x0a}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x74, offset 0x398 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x75, offset 0x3a0 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8c}, - {value: 0x3008, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x76, offset 0x3b1 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x77, offset 0x3ba - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x9a}, - {value: 0x0008, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3b08, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x78, offset 0x3ca - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x90}, - {value: 0x0008, lo: 0x91, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x79, offset 0x3d7 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x4465, lo: 0x9c, hi: 0x9c}, - {value: 0x447d, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, - {value: 0xe06d, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xaf}, - {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3e1 - {value: 0x0000, lo: 0x04}, - {value: 0x44b5, lo: 0x80, hi: 0x8f}, - {value: 0x44d5, lo: 0x90, hi: 0x9f}, - {value: 0x44f5, lo: 0xa0, hi: 0xaf}, - {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3e6 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3b08, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7c, offset 0x3f3 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7d, offset 0x3f7 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7e, offset 0x3fc - {value: 0x0020, lo: 0x01}, - {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x7f, offset 0x3fe - {value: 0x0020, lo: 0x03}, - {value: 0x4d15, lo: 0x80, hi: 0x94}, - {value: 0x4ad5, lo: 0x95, hi: 0x95}, - {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x80, offset 0x402 - {value: 0x0020, lo: 0x01}, - {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x81, offset 0x404 - {value: 0x0020, lo: 0x03}, - {value: 0x5cf5, lo: 0x80, hi: 0x84}, - {value: 0x5655, lo: 0x85, hi: 0x85}, - {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x82, offset 0x408 - {value: 0x0020, lo: 0x08}, - {value: 0x6b55, lo: 0x80, hi: 0x8f}, - {value: 0x6d15, lo: 0x90, hi: 0x90}, - {value: 0x6d55, lo: 0x91, hi: 0xab}, - {value: 0x6ea1, lo: 0xac, hi: 0xac}, - {value: 0x70b5, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x83, offset 0x411 - {value: 0x0020, lo: 0x05}, - {value: 0x72d5, lo: 0x80, hi: 0xad}, - {value: 0x6535, lo: 0xae, hi: 0xae}, - {value: 0x7895, lo: 0xaf, hi: 0xb5}, - {value: 0x6f55, lo: 0xb6, hi: 0xb6}, - {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x84, offset 0x417 - {value: 0x0028, lo: 0x03}, - {value: 0x7c21, lo: 0x80, hi: 0x82}, - {value: 0x7be1, lo: 0x83, hi: 0x83}, - {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x85, offset 0x41b - {value: 0x0038, lo: 0x0f}, - {value: 0x9db1, lo: 0x80, hi: 0x83}, - {value: 0x9e59, lo: 0x84, hi: 0x85}, - {value: 0x9e91, lo: 0x86, hi: 0x87}, - {value: 0x9ec9, lo: 0x88, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa089, lo: 0x92, hi: 0x97}, - {value: 0xa1a1, lo: 0x98, hi: 0x9c}, - {value: 0xa281, lo: 0x9d, hi: 0xb3}, - {value: 0x9d41, lo: 0xb4, hi: 0xb4}, - {value: 0x9db1, lo: 0xb5, hi: 0xb5}, - {value: 0xa789, lo: 0xb6, hi: 0xbb}, - {value: 0xa869, lo: 0xbc, hi: 0xbc}, - {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, - {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x86, offset 0x42b - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x87, offset 0x435 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x88, offset 0x43a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x89, offset 0x43d - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8a, offset 0x443 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8b, offset 0x44a - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8c, offset 0x44f - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8d, offset 0x453 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8e, offset 0x459 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xbf}, - // Block 0x8f, offset 0x45e - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x90, offset 0x467 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x91, offset 0x46c - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x92, offset 0x472 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x97}, - {value: 0x8ad5, lo: 0x98, hi: 0x9f}, - {value: 0x8aed, lo: 0xa0, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x93, offset 0x479 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x8aed, lo: 0xb0, hi: 0xb7}, - {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x94, offset 0x480 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x95, offset 0x487 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x96, offset 0x48b - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xae}, - {value: 0x0018, lo: 0xaf, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x490 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x98, offset 0x493 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x99, offset 0x498 - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0808, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0808, lo: 0x8a, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0808, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0808, lo: 0xbc, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0808, lo: 0xbf, hi: 0xbf}, - // Block 0x9a, offset 0x4a4 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0818, lo: 0x97, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb6}, - {value: 0x0818, lo: 0xb7, hi: 0xbf}, - // Block 0x9b, offset 0x4aa - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0818, lo: 0xa7, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9c, offset 0x4af - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0818, lo: 0xbb, hi: 0xbf}, - // Block 0x9d, offset 0x4b6 - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0818, lo: 0x96, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0818, lo: 0xbf, hi: 0xbf}, - // Block 0x9e, offset 0x4be - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0818, lo: 0xbc, hi: 0xbd}, - {value: 0x0808, lo: 0xbe, hi: 0xbf}, - // Block 0x9f, offset 0x4c3 - {value: 0x0000, lo: 0x03}, - {value: 0x0818, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0818, lo: 0x92, hi: 0xbf}, - // Block 0xa0, offset 0x4c7 - {value: 0x0000, lo: 0x0f}, - {value: 0x0808, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0808, lo: 0x95, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0808, lo: 0x99, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa1, offset 0x4d7 - {value: 0x0000, lo: 0x06}, - {value: 0x0818, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0818, lo: 0x90, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xbc}, - {value: 0x0818, lo: 0xbd, hi: 0xbf}, - // Block 0xa2, offset 0x4de - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0x9c}, - {value: 0x0818, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa3, offset 0x4e2 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb8}, - {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa4, offset 0x4e6 - {value: 0x0000, lo: 0x06}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0818, lo: 0x98, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0818, lo: 0xb8, hi: 0xbf}, - // Block 0xa5, offset 0x4ed - {value: 0x0000, lo: 0x01}, - {value: 0x0808, lo: 0x80, hi: 0xbf}, - // Block 0xa6, offset 0x4ef - {value: 0x0000, lo: 0x02}, - {value: 0x0808, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x4f2 - {value: 0x0000, lo: 0x02}, - {value: 0x03dd, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x4f5 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x4f9 - {value: 0x0000, lo: 0x08}, - {value: 0x0908, lo: 0x80, hi: 0x80}, - {value: 0x0a08, lo: 0x81, hi: 0xa1}, - {value: 0x0c08, lo: 0xa2, hi: 0xa2}, - {value: 0x0a08, lo: 0xa3, hi: 0xa3}, - {value: 0x3308, lo: 0xa4, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0808, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xaa, offset 0x502 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0818, lo: 0xa0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xab, offset 0x506 - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x9c}, - {value: 0x0818, lo: 0x9d, hi: 0xa6}, - {value: 0x0808, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0a08, lo: 0xb0, hi: 0xb2}, - {value: 0x0c08, lo: 0xb3, hi: 0xb3}, - {value: 0x0a08, lo: 0xb4, hi: 0xbf}, - // Block 0xac, offset 0x50e - {value: 0x0000, lo: 0x07}, - {value: 0x0a08, lo: 0x80, hi: 0x84}, - {value: 0x0808, lo: 0x85, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x90}, - {value: 0x0a18, lo: 0x91, hi: 0x93}, - {value: 0x0c18, lo: 0x94, hi: 0x94}, - {value: 0x0818, lo: 0x95, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xad, offset 0x516 - {value: 0x0000, lo: 0x05}, - {value: 0x3008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xae, offset 0x51c - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xaf, offset 0x525 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xb0, offset 0x531 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb1, offset 0x538 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb2}, - {value: 0x3b08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xb2, offset 0x541 - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x3008, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb3, offset 0x54b - {value: 0x0000, lo: 0x06}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xbe}, - {value: 0x3008, lo: 0xbf, hi: 0xbf}, - // Block 0xb4, offset 0x552 - {value: 0x0000, lo: 0x0d}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb5, offset 0x560 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3808, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb6, offset 0x56d - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0008, lo: 0x9f, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb7, offset 0x57a - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x3308, lo: 0x9f, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa9}, - {value: 0x3b08, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb8, offset 0x583 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xb9, offset 0x587 - {value: 0x0000, lo: 0x0e}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x84}, - {value: 0x3008, lo: 0x85, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9d}, - {value: 0x3308, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xba, offset 0x596 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xbb, offset 0x59e - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xbc, offset 0x5a9 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbd, offset 0x5b2 - {value: 0x0000, lo: 0x05}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x3308, lo: 0x9c, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbe, offset 0x5b8 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbf, offset 0x5c0 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xc0, offset 0x5c9 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb5}, - {value: 0x3808, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xc1, offset 0x5d3 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xc2, offset 0x5d6 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc3, offset 0x5e2 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xc4, offset 0x5eb - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc5, offset 0x5ee - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc6, offset 0x5f3 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xc7, offset 0x5fe - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x3b08, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0xbf}, - // Block 0xc8, offset 0x607 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x89}, - {value: 0x3308, lo: 0x8a, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x98}, - {value: 0x3b08, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xa2}, - {value: 0x0040, lo: 0xa3, hi: 0xbf}, - // Block 0xc9, offset 0x613 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xca, offset 0x616 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xcb, offset 0x620 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xcc, offset 0x629 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xa9}, - {value: 0x3308, lo: 0xaa, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xcd, offset 0x635 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xce, offset 0x642 - {value: 0x0000, lo: 0x0c}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xcf, offset 0x64f - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x3008, lo: 0x8a, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x3008, lo: 0x93, hi: 0x94}, - {value: 0x3308, lo: 0x95, hi: 0x95}, - {value: 0x3008, lo: 0x96, hi: 0x96}, - {value: 0x3b08, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xbf}, - // Block 0xd0, offset 0x65d - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd1, offset 0x664 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xd2, offset 0x667 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xd3, offset 0x66c - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xd4, offset 0x66f - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xd5, offset 0x672 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xd6, offset 0x675 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xd7, offset 0x67c - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xd8, offset 0x683 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xd9, offset 0x687 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xda, offset 0x692 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xdb, offset 0x695 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xdc, offset 0x698 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0xdd, offset 0x69b - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3008, lo: 0x91, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xde, offset 0x6a1 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x3308, lo: 0x8f, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xdf, offset 0x6a6 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xe0, offset 0x6aa - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe1, offset 0x6ad - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xe2, offset 0x6b0 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xe3, offset 0x6b3 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xe4, offset 0x6b6 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xe5, offset 0x6b9 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xe6, offset 0x6be - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x03c0, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xe7, offset 0x6c8 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe8, offset 0x6cb - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xe9, offset 0x6cf - {value: 0x0000, lo: 0x0e}, - {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, - {value: 0xb601, lo: 0x9f, hi: 0x9f}, - {value: 0xb649, lo: 0xa0, hi: 0xa0}, - {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, - {value: 0xb719, lo: 0xa2, hi: 0xa2}, - {value: 0xb781, lo: 0xa3, hi: 0xa3}, - {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x3018, lo: 0xa5, hi: 0xa6}, - {value: 0x3318, lo: 0xa7, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x3018, lo: 0xad, hi: 0xb2}, - {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x3318, lo: 0xbb, hi: 0xbf}, - // Block 0xea, offset 0x6de - {value: 0x0000, lo: 0x0b}, - {value: 0x3318, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x3318, lo: 0x85, hi: 0x8b}, - {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x3318, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb851, lo: 0xbb, hi: 0xbb}, - {value: 0xb899, lo: 0xbc, hi: 0xbc}, - {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, - {value: 0xb949, lo: 0xbe, hi: 0xbe}, - {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xeb, offset 0x6ea - {value: 0x0000, lo: 0x03}, - {value: 0xba19, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xec, offset 0x6ee - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x3318, lo: 0x82, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xed, offset 0x6f3 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xee, offset 0x6f7 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xef, offset 0x6fc - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0xf0, offset 0x700 - {value: 0x0000, lo: 0x04}, - {value: 0x3308, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xf1, offset 0x705 - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x3308, lo: 0xa1, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xf2, offset 0x70e - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa4}, - {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xf3, offset 0x719 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0818, lo: 0x87, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xf4, offset 0x71f - {value: 0x0000, lo: 0x07}, - {value: 0x0a08, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0818, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xf5, offset 0x727 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xb0}, - {value: 0x0818, lo: 0xb1, hi: 0xbf}, - // Block 0xf6, offset 0x72a - {value: 0x0000, lo: 0x02}, - {value: 0x0818, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xf7, offset 0x72d - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xf8, offset 0x731 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xf9, offset 0x735 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xfa, offset 0x73b - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xfb, offset 0x741 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc1c1, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xfc, offset 0x746 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xfd, offset 0x749 - {value: 0x0000, lo: 0x0f}, - {value: 0xc7e9, lo: 0x80, hi: 0x80}, - {value: 0xc839, lo: 0x81, hi: 0x81}, - {value: 0xc889, lo: 0x82, hi: 0x82}, - {value: 0xc8d9, lo: 0x83, hi: 0x83}, - {value: 0xc929, lo: 0x84, hi: 0x84}, - {value: 0xc979, lo: 0x85, hi: 0x85}, - {value: 0xc9c9, lo: 0x86, hi: 0x86}, - {value: 0xca19, lo: 0x87, hi: 0x87}, - {value: 0xca69, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcab9, lo: 0x90, hi: 0x90}, - {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xbf}, - // Block 0xfe, offset 0x759 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xff, offset 0x760 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x100, offset 0x763 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0xbf}, - // Block 0x101, offset 0x766 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x102, offset 0x76a - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x103, offset 0x770 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0x104, offset 0x775 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x105, offset 0x77a - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x106, offset 0x782 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xa2}, - {value: 0x0040, lo: 0xa3, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x107, offset 0x787 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x108, offset 0x78b - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0x109, offset 0x78f - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0x10a, offset 0x792 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x10b, offset 0x795 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x10c, offset 0x799 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x10d, offset 0x79d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x10e, offset 0x7a0 - {value: 0x0020, lo: 0x0f}, - {value: 0xdeb9, lo: 0x80, hi: 0x89}, - {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, - {value: 0xdff9, lo: 0x8b, hi: 0x9c}, - {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, - {value: 0xe239, lo: 0x9e, hi: 0xa2}, - {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, - {value: 0xe2d9, lo: 0xa4, hi: 0xab}, - {value: 0x7ed5, lo: 0xac, hi: 0xac}, - {value: 0xe3d9, lo: 0xad, hi: 0xaf}, - {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, - {value: 0xe439, lo: 0xb1, hi: 0xb6}, - {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, - {value: 0xe4f9, lo: 0xba, hi: 0xba}, - {value: 0x8edd, lo: 0xbb, hi: 0xbb}, - {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0x10f, offset 0x7b0 - {value: 0x0020, lo: 0x10}, - {value: 0x937d, lo: 0x80, hi: 0x80}, - {value: 0xf099, lo: 0x81, hi: 0x86}, - {value: 0x939d, lo: 0x87, hi: 0x8a}, - {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, - {value: 0xf159, lo: 0x8c, hi: 0x96}, - {value: 0x941d, lo: 0x97, hi: 0x97}, - {value: 0xf2b9, lo: 0x98, hi: 0xa3}, - {value: 0x943d, lo: 0xa4, hi: 0xa6}, - {value: 0xf439, lo: 0xa7, hi: 0xaa}, - {value: 0x949d, lo: 0xab, hi: 0xab}, - {value: 0xf4b9, lo: 0xac, hi: 0xac}, - {value: 0x94bd, lo: 0xad, hi: 0xad}, - {value: 0xf4d9, lo: 0xae, hi: 0xaf}, - {value: 0x94dd, lo: 0xb0, hi: 0xb1}, - {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x2040, lo: 0xbf, hi: 0xbf}, - // Block 0x110, offset 0x7c1 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0340, lo: 0x81, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x9f}, - {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0x111, offset 0x7c6 - {value: 0x0000, lo: 0x01}, - {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0x112, offset 0x7c8 - {value: 0x0000, lo: 0x01}, - {value: 0x33c0, lo: 0x80, hi: 0xbf}, - // Block 0x113, offset 0x7ca - {value: 0x0000, lo: 0x02}, - {value: 0x33c0, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, -} - -// Total table size 42466 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go deleted file mode 100644 index 8b65fa167..000000000 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ /dev/null @@ -1,4486 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// +build !go1.10 - -package idna - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -var mappings string = "" + // Size: 8175 bytes - "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + - "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + - "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + - "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + - "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + - "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + - "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + - "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + - "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + - "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + - ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + - "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + - "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + - "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + - "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + - "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + - "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + - "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + - "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + - "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + - "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + - "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + - "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + - "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + - "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + - "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + - "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + - "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + - "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + - "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + - "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + - "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + - "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + - "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + - "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + - "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + - "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + - "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + - "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + - "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + - "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + - "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + - "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + - "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + - "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + - " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + - "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + - "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + - "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + - "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + - "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + - "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + - "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + - "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + - "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + - "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + - "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + - "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + - "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + - "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + - "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + - "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + - "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + - "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + - "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + - "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + - "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + - "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + - "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + - "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + - "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + - "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + - "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + - "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + - "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + - "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + - "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + - "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + - "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + - "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + - "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + - "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + - "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + - "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + - "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + - "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + - "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + - "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + - "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + - "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + - "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + - "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + - "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + - "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + - "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + - "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + - "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + - "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + - "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + - "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + - "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + - "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + - "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + - "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + - "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + - "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + - "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + - "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + - "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" - -var xorData string = "" + // Size: 4855 bytes - "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + - "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + - "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + - "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + - "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + - "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + - "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + - "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + - "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + - "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + - "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + - "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + - "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + - "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + - "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + - "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + - "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + - "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + - "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + - "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + - "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + - "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + - "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + - "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + - "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + - "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + - "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + - "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + - "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + - "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + - "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + - "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + - "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + - "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + - "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + - "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + - "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + - "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + - "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + - "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + - "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + - "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + - ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + - "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + - "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + - "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + - "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + - "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + - "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + - "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + - "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + - "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + - "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + - "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + - "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + - "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + - "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + - "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + - "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + - "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + - "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + - "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + - "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + - "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + - "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + - "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + - "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + - "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + - "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + - "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + - "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + - "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + - "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + - "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + - "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + - "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + - "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + - "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + - "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + - "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + - "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + - "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + - "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + - "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + - "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + - "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + - "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + - "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + - "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + - "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + - "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + - "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + - "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + - ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + - "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + - "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + - "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + - "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + - "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + - "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + - "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + - "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + - "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + - "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + - "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + - "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + - "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + - "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + - "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + - "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + - "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + - "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + - "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + - "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + - "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + - "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + - "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + - "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + - "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + - "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + - "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + - "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + - "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + - "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + - "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + - "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + - "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + - "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + - "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + - "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + - "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + - "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + - "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + - "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + - "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + - "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + - "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + - "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + - "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + - "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + - "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + - "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + - "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + - "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + - "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + - "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + - "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + - "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + - "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + - "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + - "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + - "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + - "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + - "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + - "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + - "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + - "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + - "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + - "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + - "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + - "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + - "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + - "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + - "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + - "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + - "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + - "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + - "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + - "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + - "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + - "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + - "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + - "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + - "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + - "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + - "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + - "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + - "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + - "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + - "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + - "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + - "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + - "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + - "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + - "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + - "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + - "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + - "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + - "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + - "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + - "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + - "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + - "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + - "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + - "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + - "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + - "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + - "\x04\x03\x0c?\x05\x03\x0c" + - "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + - "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + - "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + - "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + - "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + - "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + - "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + - "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + - "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + - "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + - "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + - "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + - "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + - "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + - "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + - "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + - "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + - "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + - "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + - "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + - "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + - "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + - "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + - "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + - "\x05\x22\x05\x03\x050\x1d" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff. -type idnaTrie struct{} - -func newIdnaTrie(i int) *idnaTrie { - return &idnaTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { - switch { - case n < 124: - return uint16(idnaValues[n<<6+uint32(b)]) - default: - n -= 124 - return uint16(idnaSparse.lookup(n, b)) - } -} - -// idnaValues: 126 blocks, 8064 entries, 16128 bytes -// The third block is the zero block. -var idnaValues = [8064]uint16{ - // Block 0x0, offset 0x0 - 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, - 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, - 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, - 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, - 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, - 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, - 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, - 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, - 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, - 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, - 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, - // Block 0x1, offset 0x40 - 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, - 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, - 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, - 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, - 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, - 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, - 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, - 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, - 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, - 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, - 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, - 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, - 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, - 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, - 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, - 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, - // Block 0x4, offset 0x100 - 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, - 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, - 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, - 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, - 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, - 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, - 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, - 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, - 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, - // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, - 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, - 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, - 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, - 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, - 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, - 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, - 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, - 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, - // Block 0x6, offset 0x180 - 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, - 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, - 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, - 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, - 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, - 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, - 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, - 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, - 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, - 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, - 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, - 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, - 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, - 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, - 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, - 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, - 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, - 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, - 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, - 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, - // Block 0x8, offset 0x200 - 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, - 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, - 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, - 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, - 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, - 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, - 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, - 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, - 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, - // Block 0x9, offset 0x240 - 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, - 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, - 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, - 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, - 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, - 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, - 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, - 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, - 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, - // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, - 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, - 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, - 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, - 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, - 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, - 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, - 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, - 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, - 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, - 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, - 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, - 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, - 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, - 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, - 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, - 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, - 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, - // Block 0xc, offset 0x300 - 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, - 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, - 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, - 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, - 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, - 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, - 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, - 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, - 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, - 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, - 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, - // Block 0xd, offset 0x340 - 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, - 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, - 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, - 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, - 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, - 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, - 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, - 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, - 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, - 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, - 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, - // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, - 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, - 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, - 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, - 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, - 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, - 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, - 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, - 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, - 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, - 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, - 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, - 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, - 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, - 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, - 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, - 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, - 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, - 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, - 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, - 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, - // Block 0x10, offset 0x400 - 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, - 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, - 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, - 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, - 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, - 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, - 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, - 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, - 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, - 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, - 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, - // Block 0x11, offset 0x440 - 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, - 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, - 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, - 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, - 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, - 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, - 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, - 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, - 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, - 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, - // Block 0x12, offset 0x480 - 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, - 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, - 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, - 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, - 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, - 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, - 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, - 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, - 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, - 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, - 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, - 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, - 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, - 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, - 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, - 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, - 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, - 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, - 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, - 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, - 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, - // Block 0x14, offset 0x500 - 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, - 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, - 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, - 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, - 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, - 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, - 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, - 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, - 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, - 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, - 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, - // Block 0x15, offset 0x540 - 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308, - 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008, - 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308, - 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308, - 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1, - 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308, - 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008, - 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, - 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008, - 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008, - 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008, - // Block 0x16, offset 0x580 - 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008, - 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008, - 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040, - 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008, - 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008, - 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008, - 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040, - 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040, - 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040, - 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040, - 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008, - 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040, - 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008, - 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1, - 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, - 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, - 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018, - 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018, - 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040, - // Block 0x18, offset 0x600 - 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, - 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040, - 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, - 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, - 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, - 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, - 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, - 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008, - 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, - 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008, - // Block 0x19, offset 0x640 - 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040, - 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308, - 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308, - 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040, - 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040, - 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040, - 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, - 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308, - 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040, - // Block 0x1a, offset 0x680 - 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, - 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008, - 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008, - 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, - 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, - 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, - 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, - 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008, - 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, - 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308, - 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008, - 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040, - 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, - 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040, - 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308, - 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, - 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, - 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040, - 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, - // Block 0x1c, offset 0x700 - 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, - 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, - 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, - 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, - 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, - 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, - 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, - 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, - 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308, - // Block 0x1d, offset 0x740 - 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040, - 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008, - 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040, - 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008, - 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9, - 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, - 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, - 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018, - 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040, - // Block 0x1e, offset 0x780 - 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008, - 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040, - 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, - 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040, - 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040, - 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008, - 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008, - 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008, - 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, - 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040, - 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308, - 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, - 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040, - 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040, - 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, - 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, - 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040, - 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018, - 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018, - // Block 0x20, offset 0x800 - 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008, - 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008, - 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, - 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008, - 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008, - 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008, - 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040, - 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008, - 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, - 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308, - // Block 0x21, offset 0x840 - 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, - 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008, - 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, - 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040, - 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040, - 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, - 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, - 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, - 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040, - 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040, - // Block 0x22, offset 0x880 - 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040, - 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, - 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008, - 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018, - 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, - 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018, - 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008, - 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040, - 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040, - 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, - 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008, - 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008, - 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, - 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, - 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308, - 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308, - 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040, - // Block 0x24, offset 0x900 - 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008, - 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, - 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, - 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79, - 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008, - 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, - 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9, - 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040, - 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59, - 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308, - 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008, - // Block 0x25, offset 0x940 - 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018, - 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308, - 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308, - 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11, - 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308, - 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308, - 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308, - 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308, - 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308, - 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018, - // Block 0x26, offset 0x980 - 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008, - 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, - 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, - 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008, - 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008, - 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, - 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008, - 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008, - 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41, - 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008, - 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269, - // Block 0x27, offset 0x9c0 - 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1, - 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011, - 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041, - 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9, - 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099, - 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269, - 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1, - 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008, - 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008, - 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008, - 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008, - // Block 0x28, offset 0xa00 - 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, - 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, - 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, - 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, - 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169, - 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9, - 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251, - 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9, - 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359, - 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1, - 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429, - // Block 0x29, offset 0xa40 - 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, - 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, - 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, - 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008, - 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008, - 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, - 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, - 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, - 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, - 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, - 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, - // Block 0x2a, offset 0xa80 - 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, - 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, - 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, - 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, - 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008, - 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, - 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, - 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, - 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, - 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, - 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, - // Block 0x2b, offset 0xac0 - 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008, - 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045, - 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008, - 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, - 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045, - 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008, - 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045, - 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045, - 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489, - 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1, - 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040, - // Block 0x2c, offset 0xb00 - 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1, - 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591, - 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1, - 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1, - 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771, - 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891, - 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831, - 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951, - 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040, - 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459, - 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686, - // Block 0x2d, offset 0xb40 - 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040, - 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489, - 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008, - 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008, - 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2, - 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61, - 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, - 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa, - 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040, - 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9, - 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040, - // Block 0x2e, offset 0xb80 - 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a, - 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0, - 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d, - 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e, - 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, - 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018, - 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040, - 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a, - 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018, - 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018, - 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018, - 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018, - 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018, - 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9, - 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, - 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340, - 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040, - 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340, - 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61, - 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd, - 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71, - // Block 0x30, offset 0xc00 - 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61, - 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5, - 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09, - 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359, - 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040, - 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, - 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018, - 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018, - 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018, - 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, - 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040, - // Block 0x31, offset 0xc40 - 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e, - 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249, - 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41, - 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018, - 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269, - 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018, - 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018, - 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09, - 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9, - 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd, - 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109, - // Block 0x32, offset 0xc80 - 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9, - 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018, - 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151, - 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279, - 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399, - 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439, - 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369, - 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61, - 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451, - 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5, - 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61, - // Block 0x33, offset 0xcc0 - 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018, - 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040, - 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, - 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, - 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040, - 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51, - 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601, - 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691, - 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26, - 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6, - 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a, - // Block 0x34, offset 0xd00 - 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a, - 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040, - 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, - 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, - 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46, - 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06, - 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6, - 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86, - 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46, - 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199, - 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259, - // Block 0x35, offset 0xd40 - 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99, - 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089, - 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9, - 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249, - 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71, - 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9, - 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1, - 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018, - 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018, - 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018, - 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018, - // Block 0x36, offset 0xd80 - 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008, - 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008, - 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008, - 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008, - 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008, - 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd, - 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d, - 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9, - 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d, - 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008, - 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9, - // Block 0x37, offset 0xdc0 - 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008, - 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008, - 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008, - 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008, - 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008, - 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008, - 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018, - 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308, - 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040, - 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, - 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, - // Block 0x38, offset 0xe00 - 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d, - 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d, - 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d, - 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040, - 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040, - 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040, - 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040, - 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040, - 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040, - 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040, - 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040, - // Block 0x39, offset 0xe40 - 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008, - 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018, - 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018, - 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018, - 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018, - 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018, - 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, - 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018, - 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018, - 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018, - 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, - // Block 0x3a, offset 0xe80 - 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd, - 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd, - 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d, - 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d, - 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d, - 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd, - 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d, - 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd, - 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d, - 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd, - 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d, - // Block 0x3b, offset 0xec0 - 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd, - 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d, - 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018, - 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd, - 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d, - 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008, - 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008, - 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008, - 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008, - 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040, - 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040, - // Block 0x3c, offset 0xf00 - 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd, - 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018, - 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761, - 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1, - 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881, - 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd, - 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d, - 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d, - 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd, - 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d, - 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018, - // Block 0x3d, offset 0xf40 - 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d, - 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d, - 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd, - 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd, - 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d, - 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d, - 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd, - 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d, - 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999, - 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29, - 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89, - // Block 0x3e, offset 0xf80 - 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69, - 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69, - 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15, - 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75, - 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded, - 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d, - 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5, - 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d, - 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d, - 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd, - 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9, - 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1, - 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9, - 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549, - 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1, - 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11, - 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91, - 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9, - 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011, - 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209, - 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361, - // Block 0x40, offset 0x1000 - 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541, - 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781, - 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979, - 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89, - 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1, - 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99, - 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9, - 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9, - 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069, - 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9, - 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9, - // Block 0x41, offset 0x1040 - 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271, - 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9, - 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed, - 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371, - 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9, - 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d, - 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211, - 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1, - 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599, - 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9, - 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611, - // Block 0x42, offset 0x1080 - 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671, - 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709, - 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781, - 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1, - 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811, - 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901, - 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1, - 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11, - 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31, - 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51, - 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d, - // Block 0x43, offset 0x10c0 - 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, - 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, - 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, - 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, - 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, - 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, - 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, - 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, - 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, - 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, - 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, - // Block 0x44, offset 0x1100 - 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, - 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, - 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, - 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, - 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11, - 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, - 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, - 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, - 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, - 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, - 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, - // Block 0x45, offset 0x1140 - 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, - 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, - 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, - 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, - 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, - 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, - 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, - 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, - 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, - 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, - 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, - // Block 0x46, offset 0x1180 - 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, - 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, - 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, - 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, - 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, - 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, - 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, - 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, - 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, - 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008, - // Block 0x47, offset 0x11c0 - 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, - 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, - 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, - 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, - 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, - 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, - 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, - 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040, - 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008, - 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040, - 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040, - // Block 0x48, offset 0x1200 - 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575, - 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635, - 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008, - 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715, - 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5, - 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008, - 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008, - 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935, - 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5, - 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5, - 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35, - // Block 0x49, offset 0x1240 - 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35, - 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5, - 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19, - 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91, - 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040, - 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040, - 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040, - 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040, - 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040, - 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040, - 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040, - // Block 0x4a, offset 0x1280 - 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001, - 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040, - 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040, - 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9, - 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1, - 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149, - 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2, - 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1, - 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1, - 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479, - 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040, - 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659, - 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721, - 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751, - 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769, - 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799, - 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1, - 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1, - 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9, - 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829, - 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841, - // Block 0x4c, offset 0x1300 - 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871, - 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9, - 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9, - 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919, - 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931, - 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961, - 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991, - 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1, - 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818, - 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818, - 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818, - // Block 0x4d, offset 0x1340 - 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040, - 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040, - 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040, - 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09, - 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479, - 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81, - 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1, - 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19, - 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91, - 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1, - 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09, - // Block 0x4e, offset 0x1380 - 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1, - 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1, - 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1, - 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991, - 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81, - 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a, - 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99, - 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89, - 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79, - 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19, - 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649, - 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9, - 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49, - 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21, - 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9, - 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01, - 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91, - 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9, - 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171, - 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289, - 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329, - // Block 0x50, offset 0x1400 - 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1, - 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621, - 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739, - 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1, - 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9, - 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29, - 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079, - 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1, - 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171, - 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261, - 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301, - // Block 0x51, offset 0x1440 - 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1, - 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1, - 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171, - 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261, - 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351, - 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441, - 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509, - 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1, - 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081, - 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239, - 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018, - // Block 0x52, offset 0x1480 - 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040, - 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, - 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609, - 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721, - 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839, - 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919, - 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9, - 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9, - 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9, - 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1, - 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79, - // Block 0x53, offset 0x14c0 - 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989, - 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, - 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040, - 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040, - 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, - 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040, - 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040, - 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040, - 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9, - 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12, - 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040, - // Block 0x54, offset 0x1500 - 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0, - 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0, - 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55, - 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75, - 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, - 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308, - 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308, - 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308, - 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2, - 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35, - 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55, - // Block 0x55, offset 0x1540 - 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018, - 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56, - 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95, - 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa, - 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95, - 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99, - 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda, - 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040, - 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040, - 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081, - 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1, - // Block 0x56, offset 0x1580 - 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141, - 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171, - 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1, - 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1, - 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201, - 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219, - 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249, - 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291, - 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1, - 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9, - 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1, - // Block 0x57, offset 0x15c0 - 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321, - 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339, - 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369, - 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381, - 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1, - 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9, - 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9, - 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1, - 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441, - 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9, - 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0, - // Block 0x58, offset 0x1600 - 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea, - 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2, - 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9, - 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81, - 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2, - 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159, - 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41, - 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9, - 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9, - 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a, - 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a, - // Block 0x59, offset 0x1640 - 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09, - 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51, - 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039, - 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279, - 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a, - 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115, - 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5, - 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295, - 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355, - 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415, - 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215, - // Block 0x5a, offset 0x1680 - 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515, - 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595, - 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5, - 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655, - 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115, - 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735, - 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5, - 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5, - 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5, - 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5, - 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5, - 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715, - 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040, - 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935, - 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040, - 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6, - 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35, - 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040, - 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040, - 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340, - 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, - // Block 0x5c, offset 0x1700 - 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08, - 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808, - 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08, - 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908, - 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08, - 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808, - 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040, - 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18, - 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818, - 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, - 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, - // Block 0x5d, offset 0x1740 - 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08, - 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08, - 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08, - 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040, - 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040, - 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040, - 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18, - 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818, - 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040, - 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, - // Block 0x5e, offset 0x1780 - 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008, - 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008, - 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040, - 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008, - 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, - 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008, - 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040, - 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, - 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008, - 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040, - 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040, - 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008, - 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040, - 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008, - 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008, - 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008, - 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308, - 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040, - 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040, - 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040, - 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, - // Block 0x60, offset 0x1800 - 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199, - 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359, - 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269, - 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369, - 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9, - 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259, - 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99, - 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089, - 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9, - 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249, - 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359, - // Block 0x61, offset 0x1840 - 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269, - 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369, - 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9, - 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259, - 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99, - 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089, - 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9, - 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249, - 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71, - 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9, - 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369, - // Block 0x62, offset 0x1880 - 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9, - 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259, - 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99, - 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089, - 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040, - 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040, - 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71, - 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9, - 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1, - 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199, - 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99, - 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089, - 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9, - 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249, - 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71, - 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9, - 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1, - 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199, - 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359, - 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269, - 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089, - // Block 0x64, offset 0x1900 - 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9, - 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040, - 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71, - 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9, - 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040, - 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199, - 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359, - 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269, - 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369, - 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9, - 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040, - // Block 0x65, offset 0x1940 - 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040, - 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9, - 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040, - 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199, - 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359, - 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269, - 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369, - 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9, - 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259, - 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99, - 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9, - // Block 0x66, offset 0x1980 - 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1, - 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199, - 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359, - 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269, - 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369, - 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9, - 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259, - 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99, - 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089, - 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9, - 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359, - 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269, - 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369, - 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9, - 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259, - 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99, - 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089, - 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9, - 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249, - 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71, - 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369, - 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9, - 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259, - 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99, - 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089, - 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9, - 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249, - 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71, - 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9, - 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1, - 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9, - // Block 0x69, offset 0x1a40 - 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259, - 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99, - 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089, - 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9, - 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249, - 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71, - 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9, - 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1, - 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199, - 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359, - 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99, - // Block 0x6a, offset 0x1a80 - 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089, - 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9, - 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249, - 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71, - 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9, - 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1, - 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099, - 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429, - 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71, - 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9, - 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9, - // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9, - 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11, - 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109, - 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1, - 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429, - 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099, - 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429, - 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71, - 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9, - 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01, - 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9, - // Block 0x6c, offset 0x1b00 - 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11, - 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109, - 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1, - 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429, - 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099, - 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429, - 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71, - 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9, - 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01, - 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1, - 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11, - // Block 0x6d, offset 0x1b40 - 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109, - 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1, - 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429, - 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099, - 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429, - 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71, - 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9, - 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01, - 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1, - 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41, - 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109, - // Block 0x6e, offset 0x1b80 - 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1, - 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429, - 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099, - 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429, - 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71, - 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9, - 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01, - 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1, - 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41, - 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1, - 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1, - // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429, - 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41, - 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079, - 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1, - 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61, - 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9, - 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81, - 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079, - 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1, - 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61, - 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1, - // Block 0x70, offset 0x1c00 - 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, - 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, - 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, - 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, - 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, - 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, - 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, - 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, - 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, - 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, - 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, - // Block 0x71, offset 0x1c40 - 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411, - 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1, - 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9, - 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231, - 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949, - 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, - 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429, - 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, - 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, - 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351, - 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, - // Block 0x72, offset 0x1c80 - 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, - 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, - 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9, - 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231, - 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949, - 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, - 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, - 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, - 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040, - // Block 0x73, offset 0x1cc0 - 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411, - 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, - 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9, - 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231, - 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040, - 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249, - 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, - 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, - 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, - 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, - 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, - // Block 0x74, offset 0x1d00 - 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02, - 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018, - 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2, - 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72, - 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32, - 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2, - 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2, - 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040, - 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199, - 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359, - 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99, - // Block 0x75, offset 0x1d40 - 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089, - 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1, - 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018, - 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, - 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, - 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, - 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, - 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, - 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, - 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, - 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, - // Block 0x76, offset 0x1d80 - 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, - 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, - 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289, - 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349, - 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409, - 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9, - 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589, - 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649, - 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709, - 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9, - 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, - // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79, - 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39, - 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9, - 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39, - 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9, - 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79, - 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39, - 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9, - 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059, - 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9, - 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179, - // Block 0x78, offset 0x1e00 - 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239, - 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9, - 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399, - 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459, - 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309, - 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559, - 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9, - 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679, - 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9, - 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d, - 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9, - // Block 0x79, offset 0x1e40 - 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9, - 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959, - 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d, - 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d, - 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9, - 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99, - 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9, - 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9, - 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99, - 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39, - 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99, - // Block 0x7a, offset 0x1e80 - 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639, - 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9, - 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d, - 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9, - 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d, - 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd, - 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979, - 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19, - 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d, - 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d, - 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59, - // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99, - 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39, - 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9, - 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39, - 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd, - 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19, - 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9, - 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59, - 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd, - 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d, - 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079, - // Block 0x7c, offset 0x1f00 - 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d, - 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d, - 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879, - 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919, - 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd, - 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9, - 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99, - 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39, - 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9, - 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d, - 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79, - // Block 0x7d, offset 0x1f40 - 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19, - 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9, - 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59, - 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9, - 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d, - 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040, - 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040, - 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040, - 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040, - 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040, - 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, -} - -// idnaIndex: 35 blocks, 2240 entries, 4480 bytes -// Block 0 is the zero block. -var idnaIndex = [2240]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83, - 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, - 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, - // Block 0x4, offset 0x100 - 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, - 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96, - // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, - 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, - 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, - 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, - 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, - 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, - 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3, - 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b, - // Block 0x6, offset 0x180 - 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b, - 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca, - 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, - 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, - 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1, - 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6, - 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2, - 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40, - 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, - 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, - 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, - 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, - 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, - 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, - // Block 0x8, offset 0x200 - 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, - 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, - 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, - 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, - 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, - 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, - 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, - 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, - // Block 0x9, offset 0x240 - 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, - 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, - 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, - 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, - 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, - 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, - 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, - 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, - // Block 0xa, offset 0x280 - 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, - 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, - 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, - 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, - 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, - 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, - 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, - 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, - 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, - 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, - 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9, - 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1, - 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9, - 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, - 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, - // Block 0xc, offset 0x300 - 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, - 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, - 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, - 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb, - // Block 0xd, offset 0x340 - 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, - 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, - 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, - 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, - 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, - 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, - 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, - 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, - // Block 0xe, offset 0x380 - 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, - 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, - 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, - 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, - 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, - 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b, - 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51, - 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109, - 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, - 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba, - 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba, - 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d, - 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba, - 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, - 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, - // Block 0x10, offset 0x400 - 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f, - 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba, - 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138, - 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, - 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, - 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, - 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba, - 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, - // Block 0x11, offset 0x440 - 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, - 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba, - 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba, - 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, - 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, - 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, - 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, - 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, - // Block 0x12, offset 0x480 - 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, - 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, - 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, - 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, - 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, - 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, - 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, - 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, - // Block 0x13, offset 0x4c0 - 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, - 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, - 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, - 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, - 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, - 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, - 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, - 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, - // Block 0x14, offset 0x500 - 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, - 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, - 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, - 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, - 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, - 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba, - 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, - 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150, - // Block 0x15, offset 0x540 - 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, - 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, - 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, - 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151, - 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, - 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, - 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, - 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, - // Block 0x16, offset 0x580 - 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba, - 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, - 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, - 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, - 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, - 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, - 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, - 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a, - 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba, - 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, - 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, - 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, - 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, - 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, - // Block 0x18, offset 0x600 - 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, - 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, - 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, - 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, - 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba, - 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, - 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, - 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, - // Block 0x19, offset 0x640 - 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168, - 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, - 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c, - 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173, - 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, - 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, - 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, - // Block 0x1a, offset 0x680 - 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, - 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, - 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, - 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, - 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, - 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, - 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, - 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, - 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, - 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, - 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, - 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, - 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, - 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, - 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, - // Block 0x1c, offset 0x700 - 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, - 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, - 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, - 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, - 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, - 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, - 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, - 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba, - // Block 0x1d, offset 0x740 - 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba, - 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba, - 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba, - 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba, - 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a, - 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba, - 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, - 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, - // Block 0x1e, offset 0x780 - 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, - 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, - 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, - 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, - 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, - 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, - 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, - 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, - 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, - 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, - 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, - 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, - 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, - // Block 0x20, offset 0x800 - 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d, - 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba, - 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba, - 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba, - 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba, - 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba, - 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba, - 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba, - // Block 0x21, offset 0x840 - 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, - 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, - 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, - 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, - 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, - 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, - 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, - 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, - // Block 0x22, offset 0x880 - 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, - 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, -} - -// idnaSparseOffset: 258 entries, 516 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a} - -// idnaSparseValues: 1869 entries, 7476 bytes -var idnaSparseValues = [1869]valueRange{ - // Block 0x0, offset 0x0 - {value: 0x0000, lo: 0x07}, - {value: 0xe105, lo: 0x80, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x97}, - {value: 0xe105, lo: 0x98, hi: 0x9e}, - {value: 0x001f, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbf}, - // Block 0x1, offset 0x8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0xe01d, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0335, lo: 0x83, hi: 0x83}, - {value: 0x034d, lo: 0x84, hi: 0x84}, - {value: 0x0365, lo: 0x85, hi: 0x85}, - {value: 0xe00d, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0xe00d, lo: 0x88, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x89}, - {value: 0xe00d, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe00d, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0x8d}, - {value: 0xe00d, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0xbf}, - // Block 0x2, offset 0x19 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, - {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, - {value: 0x034d, lo: 0xb4, hi: 0xb4}, - {value: 0x0395, lo: 0xb5, hi: 0xb5}, - {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbf}, - // Block 0x3, offset 0x25 - {value: 0x0000, lo: 0x01}, - {value: 0x3308, lo: 0x80, hi: 0xbf}, - // Block 0x4, offset 0x27 - {value: 0x0000, lo: 0x04}, - {value: 0x03f5, lo: 0x80, hi: 0x8f}, - {value: 0xe105, lo: 0x90, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x5, offset 0x2c - {value: 0x0000, lo: 0x07}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x0545, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x6, offset 0x34 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x88}, - {value: 0x0018, lo: 0x89, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0xbd}, - {value: 0x0818, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x7, offset 0x3f - {value: 0x0000, lo: 0x0b}, - {value: 0x0818, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x82}, - {value: 0x0818, lo: 0x83, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x85}, - {value: 0x0818, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0808, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x8, offset 0x4b - {value: 0x0000, lo: 0x03}, - {value: 0x0a08, lo: 0x80, hi: 0x87}, - {value: 0x0c08, lo: 0x88, hi: 0x99}, - {value: 0x0a08, lo: 0x9a, hi: 0xbf}, - // Block 0x9, offset 0x4f - {value: 0x0000, lo: 0x0e}, - {value: 0x3308, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0c08, lo: 0x8d, hi: 0x8d}, - {value: 0x0a08, lo: 0x8e, hi: 0x98}, - {value: 0x0c08, lo: 0x99, hi: 0x9b}, - {value: 0x0a08, lo: 0x9c, hi: 0xaa}, - {value: 0x0c08, lo: 0xab, hi: 0xac}, - {value: 0x0a08, lo: 0xad, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb1}, - {value: 0x0a08, lo: 0xb2, hi: 0xb2}, - {value: 0x0c08, lo: 0xb3, hi: 0xb4}, - {value: 0x0a08, lo: 0xb5, hi: 0xb7}, - {value: 0x0c08, lo: 0xb8, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbf}, - // Block 0xa, offset 0x5e - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xb0}, - {value: 0x0808, lo: 0xb1, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xb, offset 0x63 - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x89}, - {value: 0x0a08, lo: 0x8a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0xc, offset 0x6b - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x99}, - {value: 0x0808, lo: 0x9a, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa3}, - {value: 0x0808, lo: 0xa4, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa7}, - {value: 0x0808, lo: 0xa8, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0818, lo: 0xb0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd, offset 0x77 - {value: 0x0000, lo: 0x0d}, - {value: 0x0c08, lo: 0x80, hi: 0x80}, - {value: 0x0a08, lo: 0x81, hi: 0x85}, - {value: 0x0c08, lo: 0x86, hi: 0x87}, - {value: 0x0a08, lo: 0x88, hi: 0x88}, - {value: 0x0c08, lo: 0x89, hi: 0x89}, - {value: 0x0a08, lo: 0x8a, hi: 0x93}, - {value: 0x0c08, lo: 0x94, hi: 0x94}, - {value: 0x0a08, lo: 0x95, hi: 0x95}, - {value: 0x0808, lo: 0x96, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0818, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xe, offset 0x85 - {value: 0x0000, lo: 0x0d}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0a08, lo: 0xa0, hi: 0xa9}, - {value: 0x0c08, lo: 0xaa, hi: 0xac}, - {value: 0x0808, lo: 0xad, hi: 0xad}, - {value: 0x0c08, lo: 0xae, hi: 0xae}, - {value: 0x0a08, lo: 0xaf, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb2}, - {value: 0x0a08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0a08, lo: 0xb6, hi: 0xb8}, - {value: 0x0c08, lo: 0xb9, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xf, offset 0x93 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0xa1}, - {value: 0x0840, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xbf}, - // Block 0x10, offset 0x98 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x11, offset 0xa1 - {value: 0x0000, lo: 0x0f}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x3008, lo: 0x86, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x3008, lo: 0x8a, hi: 0x8c}, - {value: 0x3b08, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x12, offset 0xb1 - {value: 0x0000, lo: 0x0d}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x13, offset 0xbf - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xcc - {value: 0x0000, lo: 0x0b}, - {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x15, offset 0xd8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x3b08, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x3008, lo: 0x98, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x16, offset 0xe9 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb9}, - {value: 0x3b08, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x17, offset 0xf3 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x8e}, - {value: 0x0018, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x18, offset 0xfa - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, - {value: 0x0008, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x19, offset 0x107 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe03d, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x1a, offset 0x118 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1b, offset 0x11f - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1c, offset 0x12a - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x3008, lo: 0x96, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x3308, lo: 0x9e, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x3008, lo: 0xa2, hi: 0xa4}, - {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1d, offset 0x139 - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x8c}, - {value: 0x3308, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x3008, lo: 0x9a, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1e, offset 0x147 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x86}, - {value: 0x055d, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8c}, - {value: 0x055d, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0xe105, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x1f, offset 0x151 - {value: 0x0000, lo: 0x01}, - {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x20, offset 0x153 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x2018, lo: 0xa1, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x21, offset 0x158 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa7}, - {value: 0x2018, lo: 0xa8, hi: 0xbf}, - // Block 0x22, offset 0x15b - {value: 0x0000, lo: 0x02}, - {value: 0x2018, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0xbf}, - // Block 0x23, offset 0x15e - {value: 0x0000, lo: 0x01}, - {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x24, offset 0x160 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x25, offset 0x16c - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x26, offset 0x177 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x17f - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x28, offset 0x185 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x29, offset 0x18b - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2a, offset 0x190 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2b, offset 0x195 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2c, offset 0x198 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2d, offset 0x19c - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2e, offset 0x1a2 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2f, offset 0x1a7 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x3b08, lo: 0x94, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x30, offset 0x1b3 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x31, offset 0x1bd - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x3340, lo: 0xb4, hi: 0xb5}, - {value: 0x3008, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x32, offset 0x1c3 - {value: 0x0000, lo: 0x10}, - {value: 0x3008, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x91}, - {value: 0x3b08, lo: 0x92, hi: 0x92}, - {value: 0x3308, lo: 0x93, hi: 0x93}, - {value: 0x0018, lo: 0x94, hi: 0x96}, - {value: 0x0008, lo: 0x97, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x33, offset 0x1d4 - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x86}, - {value: 0x0218, lo: 0x87, hi: 0x87}, - {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x33c0, lo: 0x8b, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x34, offset 0x1de - {value: 0x0000, lo: 0x02}, - {value: 0x0208, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x35, offset 0x1e1 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xa9}, - {value: 0x0208, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x36, offset 0x1e9 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x37, offset 0x1ec - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x38, offset 0x1f9 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x39, offset 0x201 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x3a, offset 0x205 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0028, lo: 0x9a, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3b, offset 0x20c - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x3308, lo: 0x97, hi: 0x98}, - {value: 0x3008, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3c, offset 0x214 - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x3008, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3b08, lo: 0xa0, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xac}, - {value: 0x3008, lo: 0xad, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x224 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xbd}, - {value: 0x3318, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3e, offset 0x230 - {value: 0x0000, lo: 0x01}, - {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3f, offset 0x232 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3008, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x23c - {value: 0x0000, lo: 0x0b}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x3808, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x41, offset 0x248 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3808, lo: 0xaa, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x42, offset 0x254 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3008, lo: 0xaa, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3808, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x43, offset 0x260 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x3008, lo: 0xa4, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x44, offset 0x268 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x45, offset 0x26d - {value: 0x0000, lo: 0x09}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, - {value: 0x057d, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x46, offset 0x277 - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x92}, - {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa8}, - {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x47, offset 0x288 - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0x48, offset 0x28c - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x87}, - {value: 0xe045, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0xe045, lo: 0x98, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0xe045, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x49, offset 0x297 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x3318, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x4a, offset 0x29b - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4b, offset 0x2a4 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4c, offset 0x2ac - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x9f}, - {value: 0x0080, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xad}, - {value: 0x0080, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4d, offset 0x2b2 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xa8}, - {value: 0x09c5, lo: 0xa9, hi: 0xa9}, - {value: 0x09e5, lo: 0xaa, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4e, offset 0x2b7 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x4f, offset 0x2ba - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x50, offset 0x2bd - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x51, offset 0x2c1 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0e66, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, - {value: 0x0e86, lo: 0xb6, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x52, offset 0x2c7 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x53, offset 0x2cb - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x54, offset 0x2cf - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0x55, offset 0x2d5 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xab}, - {value: 0x0018, lo: 0xac, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2dc - {value: 0x0000, lo: 0x05}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x03f5, lo: 0x90, hi: 0x9f}, - {value: 0x0ea5, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x57, offset 0x2e2 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x58, offset 0x2ea - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xae}, - {value: 0xe075, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0x59, offset 0x2f1 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x5a, offset 0x2fc - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xbf}, - // Block 0x5b, offset 0x306 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5c, offset 0x30a - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0xbf}, - // Block 0x5d, offset 0x30d - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9e}, - {value: 0x0edd, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5e, offset 0x313 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb2}, - {value: 0x0efd, lo: 0xb3, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5f, offset 0x317 - {value: 0x0020, lo: 0x01}, - {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x60, offset 0x319 - {value: 0x0020, lo: 0x02}, - {value: 0x171d, lo: 0x80, hi: 0x8f}, - {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x61, offset 0x31c - {value: 0x0020, lo: 0x01}, - {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x62, offset 0x31e - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x63, offset 0x321 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x64, offset 0x32b - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x65, offset 0x32e - {value: 0x0000, lo: 0x0e}, - {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xb0}, - {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, - {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, - {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, - {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, - {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, - {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, - {value: 0x2abd, lo: 0xb7, hi: 0xb7}, - {value: 0x2add, lo: 0xb8, hi: 0xb9}, - {value: 0x2afd, lo: 0xba, hi: 0xbb}, - {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, - {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x66, offset 0x33d - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x67, offset 0x341 - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x68, offset 0x346 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x69, offset 0x349 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x6a, offset 0x34d - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x6b, offset 0x352 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6c, offset 0x357 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb1}, - {value: 0x0018, lo: 0xb2, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6d, offset 0x35d - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6e89, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6e, offset 0x363 - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x3308, lo: 0x8b, hi: 0x8b}, - {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6f, offset 0x372 - {value: 0x0000, lo: 0x05}, - {value: 0x0208, lo: 0x80, hi: 0xb1}, - {value: 0x0108, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x70, offset 0x378 - {value: 0x0000, lo: 0x03}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xbf}, - // Block 0x71, offset 0x37c - {value: 0x0000, lo: 0x0e}, - {value: 0x3008, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xba}, - {value: 0x0008, lo: 0xbb, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x72, offset 0x38b - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x73, offset 0x390 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x91}, - {value: 0x3008, lo: 0x92, hi: 0x92}, - {value: 0x3808, lo: 0x93, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x74, offset 0x398 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb9}, - {value: 0x3008, lo: 0xba, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x75, offset 0x3a2 - {value: 0x0000, lo: 0x0a}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x76, offset 0x3ad - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x77, offset 0x3b5 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8c}, - {value: 0x3008, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x78, offset 0x3c6 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x79, offset 0x3cf - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x9a}, - {value: 0x0008, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3b08, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x7a, offset 0x3df - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x90}, - {value: 0x0008, lo: 0x91, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3ec - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x4465, lo: 0x9c, hi: 0x9c}, - {value: 0x447d, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, - {value: 0xe06d, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xaf}, - {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x3f6 - {value: 0x0000, lo: 0x04}, - {value: 0x44b5, lo: 0x80, hi: 0x8f}, - {value: 0x44d5, lo: 0x90, hi: 0x9f}, - {value: 0x44f5, lo: 0xa0, hi: 0xaf}, - {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7d, offset 0x3fb - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3b08, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7e, offset 0x408 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7f, offset 0x40c - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x80, offset 0x411 - {value: 0x0020, lo: 0x01}, - {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x81, offset 0x413 - {value: 0x0020, lo: 0x03}, - {value: 0x4d15, lo: 0x80, hi: 0x94}, - {value: 0x4ad5, lo: 0x95, hi: 0x95}, - {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x82, offset 0x417 - {value: 0x0020, lo: 0x01}, - {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x83, offset 0x419 - {value: 0x0020, lo: 0x03}, - {value: 0x5cf5, lo: 0x80, hi: 0x84}, - {value: 0x5655, lo: 0x85, hi: 0x85}, - {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x84, offset 0x41d - {value: 0x0020, lo: 0x08}, - {value: 0x6b55, lo: 0x80, hi: 0x8f}, - {value: 0x6d15, lo: 0x90, hi: 0x90}, - {value: 0x6d55, lo: 0x91, hi: 0xab}, - {value: 0x6ea1, lo: 0xac, hi: 0xac}, - {value: 0x70b5, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x85, offset 0x426 - {value: 0x0020, lo: 0x05}, - {value: 0x72d5, lo: 0x80, hi: 0xad}, - {value: 0x6535, lo: 0xae, hi: 0xae}, - {value: 0x7895, lo: 0xaf, hi: 0xb5}, - {value: 0x6f55, lo: 0xb6, hi: 0xb6}, - {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x86, offset 0x42c - {value: 0x0028, lo: 0x03}, - {value: 0x7c21, lo: 0x80, hi: 0x82}, - {value: 0x7be1, lo: 0x83, hi: 0x83}, - {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x87, offset 0x430 - {value: 0x0038, lo: 0x0f}, - {value: 0x9db1, lo: 0x80, hi: 0x83}, - {value: 0x9e59, lo: 0x84, hi: 0x85}, - {value: 0x9e91, lo: 0x86, hi: 0x87}, - {value: 0x9ec9, lo: 0x88, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa089, lo: 0x92, hi: 0x97}, - {value: 0xa1a1, lo: 0x98, hi: 0x9c}, - {value: 0xa281, lo: 0x9d, hi: 0xb3}, - {value: 0x9d41, lo: 0xb4, hi: 0xb4}, - {value: 0x9db1, lo: 0xb5, hi: 0xb5}, - {value: 0xa789, lo: 0xb6, hi: 0xbb}, - {value: 0xa869, lo: 0xbc, hi: 0xbc}, - {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, - {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x88, offset 0x440 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x89, offset 0x44a - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x8a, offset 0x44f - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x8b, offset 0x452 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8c, offset 0x458 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8d, offset 0x45f - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8e, offset 0x464 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8f, offset 0x468 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x90, offset 0x46e - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x91, offset 0x473 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x92, offset 0x47c - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x93, offset 0x481 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x94, offset 0x487 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x97}, - {value: 0x8ad5, lo: 0x98, hi: 0x9f}, - {value: 0x8aed, lo: 0xa0, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x95, offset 0x48e - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x8aed, lo: 0xb0, hi: 0xb7}, - {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x96, offset 0x495 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x97, offset 0x49c - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x98, offset 0x4a0 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xae}, - {value: 0x0018, lo: 0xaf, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x99, offset 0x4a5 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x9a, offset 0x4a8 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x9b, offset 0x4ad - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0808, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0808, lo: 0x8a, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0808, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0808, lo: 0xbc, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0808, lo: 0xbf, hi: 0xbf}, - // Block 0x9c, offset 0x4b9 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0818, lo: 0x97, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb6}, - {value: 0x0818, lo: 0xb7, hi: 0xbf}, - // Block 0x9d, offset 0x4bf - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0818, lo: 0xa7, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9e, offset 0x4c4 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0818, lo: 0xbb, hi: 0xbf}, - // Block 0x9f, offset 0x4cb - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0818, lo: 0x96, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0818, lo: 0xbf, hi: 0xbf}, - // Block 0xa0, offset 0x4d3 - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0818, lo: 0xbc, hi: 0xbd}, - {value: 0x0808, lo: 0xbe, hi: 0xbf}, - // Block 0xa1, offset 0x4d8 - {value: 0x0000, lo: 0x03}, - {value: 0x0818, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0818, lo: 0x92, hi: 0xbf}, - // Block 0xa2, offset 0x4dc - {value: 0x0000, lo: 0x0f}, - {value: 0x0808, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0808, lo: 0x95, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0808, lo: 0x99, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa3, offset 0x4ec - {value: 0x0000, lo: 0x06}, - {value: 0x0818, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0818, lo: 0x90, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xbc}, - {value: 0x0818, lo: 0xbd, hi: 0xbf}, - // Block 0xa4, offset 0x4f3 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0x9c}, - {value: 0x0818, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa5, offset 0x4f7 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb8}, - {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa6, offset 0x4fb - {value: 0x0000, lo: 0x06}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0818, lo: 0x98, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0818, lo: 0xb8, hi: 0xbf}, - // Block 0xa7, offset 0x502 - {value: 0x0000, lo: 0x01}, - {value: 0x0808, lo: 0x80, hi: 0xbf}, - // Block 0xa8, offset 0x504 - {value: 0x0000, lo: 0x02}, - {value: 0x0808, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa9, offset 0x507 - {value: 0x0000, lo: 0x02}, - {value: 0x03dd, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xaa, offset 0x50a - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xbf}, - // Block 0xab, offset 0x50e - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0818, lo: 0xa0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xac, offset 0x512 - {value: 0x0000, lo: 0x05}, - {value: 0x3008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xad, offset 0x518 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xae, offset 0x521 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbc}, - {value: 0x0340, lo: 0xbd, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xaf, offset 0x52d - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb0, offset 0x534 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb2}, - {value: 0x3b08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xb1, offset 0x53d - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb2, offset 0x545 - {value: 0x0000, lo: 0x06}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xbe}, - {value: 0x3008, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x54c - {value: 0x0000, lo: 0x0d}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x89}, - {value: 0x3308, lo: 0x8a, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb4, offset 0x55a - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3808, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb5, offset 0x567 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0008, lo: 0x9f, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb6, offset 0x574 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x3308, lo: 0x9f, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa9}, - {value: 0x3b08, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb7, offset 0x57d - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xb8, offset 0x581 - {value: 0x0000, lo: 0x0d}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x84}, - {value: 0x3008, lo: 0x85, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xb9, offset 0x58f - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xba, offset 0x597 - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xbb, offset 0x5a2 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbc, offset 0x5ab - {value: 0x0000, lo: 0x05}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x3308, lo: 0x9c, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbd, offset 0x5b1 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbe, offset 0x5b9 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xbf, offset 0x5c2 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb5}, - {value: 0x3808, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xc0, offset 0x5cc - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xc1, offset 0x5cf - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc2, offset 0x5db - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc3, offset 0x5de - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc4, offset 0x5e3 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc5, offset 0x5e6 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc6, offset 0x5f0 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xc7, offset 0x5f9 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xa9}, - {value: 0x3308, lo: 0xaa, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xc8, offset 0x605 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xc9, offset 0x608 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xca, offset 0x60d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xcb, offset 0x610 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xcc, offset 0x613 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xcd, offset 0x616 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xce, offset 0x61d - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xcf, offset 0x624 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xd0, offset 0x628 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xd1, offset 0x633 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xd2, offset 0x636 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3008, lo: 0x91, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd3, offset 0x63c - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x3308, lo: 0x8f, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xd4, offset 0x641 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0xd5, offset 0x645 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xd6, offset 0x648 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xd7, offset 0x64b - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0xbf}, - // Block 0xd8, offset 0x64e - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xd9, offset 0x653 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x03c0, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xda, offset 0x65d - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xdb, offset 0x660 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xdc, offset 0x664 - {value: 0x0000, lo: 0x0e}, - {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, - {value: 0xb601, lo: 0x9f, hi: 0x9f}, - {value: 0xb649, lo: 0xa0, hi: 0xa0}, - {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, - {value: 0xb719, lo: 0xa2, hi: 0xa2}, - {value: 0xb781, lo: 0xa3, hi: 0xa3}, - {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x3018, lo: 0xa5, hi: 0xa6}, - {value: 0x3318, lo: 0xa7, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x3018, lo: 0xad, hi: 0xb2}, - {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x3318, lo: 0xbb, hi: 0xbf}, - // Block 0xdd, offset 0x673 - {value: 0x0000, lo: 0x0b}, - {value: 0x3318, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x3318, lo: 0x85, hi: 0x8b}, - {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x3318, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb851, lo: 0xbb, hi: 0xbb}, - {value: 0xb899, lo: 0xbc, hi: 0xbc}, - {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, - {value: 0xb949, lo: 0xbe, hi: 0xbe}, - {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xde, offset 0x67f - {value: 0x0000, lo: 0x03}, - {value: 0xba19, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xdf, offset 0x683 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x3318, lo: 0x82, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xe0, offset 0x688 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe1, offset 0x68d - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0xe2, offset 0x691 - {value: 0x0000, lo: 0x04}, - {value: 0x3308, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xe3, offset 0x696 - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x3308, lo: 0xa1, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xe4, offset 0x69f - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa4}, - {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xe5, offset 0x6aa - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0818, lo: 0x87, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xe6, offset 0x6b0 - {value: 0x0000, lo: 0x07}, - {value: 0x0a08, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0818, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe7, offset 0x6b8 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe8, offset 0x6bc - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xe9, offset 0x6c0 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xea, offset 0x6c6 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xeb, offset 0x6cc - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc1c1, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xec, offset 0x6d1 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xed, offset 0x6d4 - {value: 0x0000, lo: 0x0d}, - {value: 0xc7e9, lo: 0x80, hi: 0x80}, - {value: 0xc839, lo: 0x81, hi: 0x81}, - {value: 0xc889, lo: 0x82, hi: 0x82}, - {value: 0xc8d9, lo: 0x83, hi: 0x83}, - {value: 0xc929, lo: 0x84, hi: 0x84}, - {value: 0xc979, lo: 0x85, hi: 0x85}, - {value: 0xc9c9, lo: 0x86, hi: 0x86}, - {value: 0xca19, lo: 0x87, hi: 0x87}, - {value: 0xca69, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcab9, lo: 0x90, hi: 0x90}, - {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xee, offset 0x6e2 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xef, offset 0x6e9 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xf0, offset 0x6ec - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0xbf}, - // Block 0xf1, offset 0x6ef - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0xf2, offset 0x6f3 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0xf3, offset 0x6f9 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0xf4, offset 0x6fe - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xf5, offset 0x708 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf6, offset 0x70d - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xf7, offset 0x710 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0xbf}, - // Block 0xf8, offset 0x713 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xf9, offset 0x716 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xfa, offset 0x719 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xfb, offset 0x71d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xfc, offset 0x720 - {value: 0x0020, lo: 0x0f}, - {value: 0xdeb9, lo: 0x80, hi: 0x89}, - {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, - {value: 0xdff9, lo: 0x8b, hi: 0x9c}, - {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, - {value: 0xe239, lo: 0x9e, hi: 0xa2}, - {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, - {value: 0xe2d9, lo: 0xa4, hi: 0xab}, - {value: 0x7ed5, lo: 0xac, hi: 0xac}, - {value: 0xe3d9, lo: 0xad, hi: 0xaf}, - {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, - {value: 0xe439, lo: 0xb1, hi: 0xb6}, - {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, - {value: 0xe4f9, lo: 0xba, hi: 0xba}, - {value: 0x8edd, lo: 0xbb, hi: 0xbb}, - {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0xfd, offset 0x730 - {value: 0x0020, lo: 0x10}, - {value: 0x937d, lo: 0x80, hi: 0x80}, - {value: 0xf099, lo: 0x81, hi: 0x86}, - {value: 0x939d, lo: 0x87, hi: 0x8a}, - {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, - {value: 0xf159, lo: 0x8c, hi: 0x96}, - {value: 0x941d, lo: 0x97, hi: 0x97}, - {value: 0xf2b9, lo: 0x98, hi: 0xa3}, - {value: 0x943d, lo: 0xa4, hi: 0xa6}, - {value: 0xf439, lo: 0xa7, hi: 0xaa}, - {value: 0x949d, lo: 0xab, hi: 0xab}, - {value: 0xf4b9, lo: 0xac, hi: 0xac}, - {value: 0x94bd, lo: 0xad, hi: 0xad}, - {value: 0xf4d9, lo: 0xae, hi: 0xaf}, - {value: 0x94dd, lo: 0xb0, hi: 0xb1}, - {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x2040, lo: 0xbf, hi: 0xbf}, - // Block 0xfe, offset 0x741 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0340, lo: 0x81, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x9f}, - {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0xff, offset 0x746 - {value: 0x0000, lo: 0x01}, - {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0x100, offset 0x748 - {value: 0x0000, lo: 0x01}, - {value: 0x33c0, lo: 0x80, hi: 0xbf}, - // Block 0x101, offset 0x74a - {value: 0x0000, lo: 0x02}, - {value: 0x33c0, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, -} - -// Total table size 41662 bytes (40KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go deleted file mode 100644 index c4ef847e7..000000000 --- a/vendor/golang.org/x/net/idna/trie.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// appendMapping appends the mapping for the respective rune. isMapped must be -// true. A mapping is a categorization of a rune as defined in UTS #46. -func (c info) appendMapping(b []byte, s string) []byte { - index := int(c >> indexShift) - if c&xorBit == 0 { - s := mappings[index:] - return append(b, s[1:s[0]+1]...) - } - b = append(b, s...) - if c&inlineXOR == inlineXOR { - // TODO: support and handle two-byte inline masks - b[len(b)-1] ^= byte(index) - } else { - for p := len(b) - int(xorData[index]); p < len(b); p++ { - index++ - b[p] ^= xorData[index] - } - } - return b -} - -// Sparse block handling code. - -type valueRange struct { - value uint16 // header: value:stride - lo, hi byte // header: lo:n -} - -type sparseBlocks struct { - values []valueRange - offset []uint16 -} - -var idnaSparse = sparseBlocks{ - values: idnaSparseValues[:], - offset: idnaSparseOffset[:], -} - -// Don't use newIdnaTrie to avoid unconditional linking in of the table. -var trie = &idnaTrie{} - -// lookup determines the type of block n and looks up the value for b. -// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block -// is a list of ranges with an accompanying value. Given a matching range r, -// the value for b is by r.value + (b - r.lo) * stride. -func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { - offset := t.offset[n] - header := t.values[offset] - lo := offset + 1 - hi := lo + uint16(header.lo) - for lo < hi { - m := lo + (hi-lo)/2 - r := t.values[m] - if r.lo <= b && b <= r.hi { - return r.value + uint16(b-r.lo)*header.value - } - if b < r.lo { - hi = m - } else { - lo = m + 1 - } - } - return 0 -} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go deleted file mode 100644 index 7a8cf889b..000000000 --- a/vendor/golang.org/x/net/idna/trieval.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package idna - -// This file contains definitions for interpreting the trie value of the idna -// trie generated by "go run gen*.go". It is shared by both the generator -// program and the resultant package. Sharing is achieved by the generator -// copying gen_trieval.go to trieval.go and changing what's above this comment. - -// info holds information from the IDNA mapping table for a single rune. It is -// the value returned by a trie lookup. In most cases, all information fits in -// a 16-bit value. For mappings, this value may contain an index into a slice -// with the mapped string. Such mappings can consist of the actual mapped value -// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the -// input rune. This technique is used by the cases packages and reduces the -// table size significantly. -// -// The per-rune values have the following format: -// -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category -// -// See the definitions below for a more detailed description of the various -// bits. -type info uint16 - -const ( - catSmallMask = 0x3 - catBigMask = 0xF8 - indexShift = 3 - xorBit = 0x4 // interpret the index as an xor pattern - inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. - - joinShift = 8 - joinMask = 0x07 - - // Attributes - attributesMask = 0x1800 - viramaModifier = 0x1800 - modifier = 0x1000 - rtl = 0x0800 - - mayNeedNorm = 0x2000 -) - -// A category corresponds to a category defined in the IDNA mapping table. -type category uint16 - -const ( - unknown category = 0 // not currently defined in unicode. - mapped category = 1 - disallowedSTD3Mapped category = 2 - deviation category = 3 -) - -const ( - valid category = 0x08 - validNV8 category = 0x18 - validXV8 category = 0x28 - disallowed category = 0x40 - disallowedSTD3Valid category = 0x80 - ignored category = 0xC0 -) - -// join types and additional rune information -const ( - joiningL = (iota + 1) - joiningD - joiningT - joiningR - - //the following types are derived during processing - joinZWJ - joinZWNJ - joinVirama - numJoinTypes -) - -func (c info) isMapped() bool { - return c&0x3 != 0 -} - -func (c info) category() category { - small := c & catSmallMask - if small != 0 { - return category(small) - } - return category(c & catBigMask) -} - -func (c info) joinType() info { - if c.isMapped() { - return 0 - } - return (c >> joinShift) & joinMask -} - -func (c info) isModifier() bool { - return c&(modifier|catSmallMask) == modifier -} - -func (c info) isViramaModifier() bool { - return c&(attributesMask|catSmallMask) == viramaModifier -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index 685f0e7ea..000000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries // import "golang.org/x/net/internal/timeseries" - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index c646a6952..000000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Events handler. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl().Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -var eventsTmplCache *template.Template -var eventsTmplOnce sync.Once - -func eventsTmpl() *template.Template { - eventsTmplOnce.Do(func() { - eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, - }).Parse(eventsHTML)) - }) - return eventsTmplCache -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index 9bf4286c7..000000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index 3ebf6f2da..000000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1130 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "context" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "net/url" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// HTTP ServeMux paths. -const ( - debugRequestsPath = "/debug/requests" - debugEventsPath = "/debug/events" -) - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) - if pat == debugRequestsPath { - panic("/debug/requests is already registered. You may have two independent copies of " + - "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + - "involve a vendored copy of golang.org/x/net/trace.") - } - - // TODO(jbd): Serve Traces from /debug/traces in the future? - // There is no requirement for a request to be present to have traces. - http.HandleFunc(debugRequestsPath, Traces) - http.HandleFunc(debugEventsPath, Events) -} - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Traces responds with traces from the program. -// The package initialization registers it in http.DefaultServeMux -// at /debug/requests. -// -// It performs authorization by running AuthRequest. -func Traces(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) -} - -// Events responds with a page of events collected by EventLogs. -// The package initialization registers it in http.DefaultServeMux -// at /debug/events. -// -// It performs authorization by running AuthRequest. -func Events(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Traces handler. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - elapsed := time.Now().Sub(tr.Start) - tr.mu.Lock() - tr.Elapsed = elapsed - tr.mu.Unlock() - - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - tr.mu.RLock() // protects tr fields in Cond.match calls - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - tr.mu.RUnlock() - - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Start time of the this trace. - Start time.Time - - mu sync.RWMutex - events []event // Append-only sequence of events (modulo discards). - maxEvents int - recycler func(interface{}) - IsError bool // Whether this trace resulted in an error. - Elapsed time.Duration // Elapsed time for this trace, zero while active. - traceID uint64 // Trace information if non-zero. - spanID uint64 - - refs int32 // how many buckets this is in - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - - tr.mu.Lock() - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.recycler = nil - tr.mu.Unlock() - - tr.refs = 0 - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { - tr.mu.Lock() - tr.IsError = true - tr.mu.Unlock() -} - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.mu.Lock() - tr.recycler = f - tr.mu.Unlock() -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.mu.Lock() - tr.traceID, tr.spanID = traceID, spanID - tr.mu.Unlock() -} - -func (tr *trace) SetMaxEvents(m int) { - tr.mu.Lock() - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } - tr.mu.Unlock() -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - tr.mu.RLock() - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - tr.mu.RUnlock() - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - tr.mu.RLock() - t := tr.Elapsed - tr.mu.RUnlock() - - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/sys/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/sys/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s deleted file mode 100644 index 06f84b855..000000000 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// -// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go -// - -TEXT ·syscall6(SB),NOSPLIT,$0-88 - JMP syscall·syscall6(SB) - -TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 - JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go deleted file mode 100644 index da6b9e436..000000000 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "encoding/binary" - "runtime" -) - -// hostByteOrder returns binary.LittleEndian on little-endian machines and -// binary.BigEndian on big-endian machines. -func hostByteOrder() binary.ByteOrder { - switch runtime.GOARCH { - case "386", "amd64", "amd64p32", - "arm", "arm64", - "mipsle", "mips64le", "mips64p32le", - "ppc64le", - "riscv", "riscv64": - return binary.LittleEndian - case "armbe", "arm64be", - "mips", "mips64", "mips64p32", - "ppc", "ppc64", - "s390", "s390x", - "sparc", "sparc64": - return binary.BigEndian - } - panic("unknown architecture") -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go deleted file mode 100644 index 679e78c2c..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpu implements processor feature detection for -// various CPU architectures. -package cpu - -// Initialized reports whether the CPU features were initialized. -// -// For some GOOS/GOARCH combinations initialization of the CPU features depends -// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm -// Initialized will report false if reading the file fails. -var Initialized bool - -// CacheLinePad is used to pad structs to avoid false sharing. -type CacheLinePad struct{ _ [cacheLineSize]byte } - -// X86 contains the supported CPU features of the -// current X86/AMD64 platform. If the current platform -// is not X86/AMD64 then all feature flags are false. -// -// X86 is padded to avoid false sharing. Further the HasAVX -// and HasAVX2 are only set if the OS supports XMM and YMM -// registers in addition to the CPUID feature bit being set. -var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - _ CacheLinePad -} - -// ARM64 contains the supported CPU features of the -// current ARMv8(aarch64) platform. If the current platform -// is not arm64 then all feature flags are false. -var ARM64 struct { - _ CacheLinePad - HasFP bool // Floating-point instruction set (always available) - HasASIMD bool // Advanced SIMD (always available) - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - HasATOMICS bool // Atomic memory operation instruction set - HasFPHP bool // Half precision floating-point instruction set - HasASIMDHP bool // Advanced SIMD half precision instruction set - HasCPUID bool // CPUID identification scheme registers - HasASIMDRDM bool // Rounding double multiply add/subtract instruction set - HasJSCVT bool // Javascript conversion from floating-point to integer - HasFCMA bool // Floating-point multiplication and addition of complex numbers - HasLRCPC bool // Release Consistent processor consistent support - HasDCPOP bool // Persistent memory support - HasSHA3 bool // SHA3 hardware implementation - HasSM3 bool // SM3 hardware implementation - HasSM4 bool // SM4 hardware implementation - HasASIMDDP bool // Advanced SIMD double precision instruction set - HasSHA512 bool // SHA512 hardware implementation - HasSVE bool // Scalable Vector Extensions - HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 - _ CacheLinePad -} - -// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. -// If the current platform is not ppc64/ppc64le then all feature flags are false. -// -// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07). -// The struct is padded to avoid false sharing. -var PPC64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) - _ CacheLinePad -} - -// S390X contains the supported CPU features of the current IBM Z -// (s390x) platform. If the current platform is not IBM Z then all -// feature flags are false. -// -// S390X is padded to avoid false sharing. Further HasVX is only set -// if the OS supports vector registers in addition to the STFLE -// feature bit being set. -var S390X struct { - _ CacheLinePad - HasZARCH bool // z/Architecture mode is active [mandatory] - HasSTFLE bool // store facility list extended - HasLDISP bool // long (20-bit) displacements - HasEIMM bool // 32-bit immediates - HasDFP bool // decimal floating point - HasETF3EH bool // ETF-3 enhanced - HasMSA bool // message security assist (CPACF) - HasAES bool // KM-AES{128,192,256} functions - HasAESCBC bool // KMC-AES{128,192,256} functions - HasAESCTR bool // KMCTR-AES{128,192,256} functions - HasAESGCM bool // KMA-GCM-AES{128,192,256} functions - HasGHASH bool // KIMD-GHASH function - HasSHA1 bool // K{I,L}MD-SHA-1 functions - HasSHA256 bool // K{I,L}MD-SHA-256 functions - HasSHA512 bool // K{I,L}MD-SHA-512 functions - HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions - HasVX bool // vector facility - HasVXE bool // vector-enhancements facility 1 - _ CacheLinePad -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go deleted file mode 100644 index be6027224..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build aix,ppc64 - -package cpu - -const cacheLineSize = 128 - -const ( - // getsystemcfg constants - _SC_IMPL = 2 - _IMPL_POWER8 = 0x10000 - _IMPL_POWER9 = 0x20000 -) - -func init() { - impl := getsystemcfg(_SC_IMPL) - if impl&_IMPL_POWER8 != 0 { - PPC64.IsPOWER8 = true - } - if impl&_IMPL_POWER9 != 0 { - PPC64.IsPOWER9 = true - } - - Initialized = true -} - -func getsystemcfg(label int) (n uint64) { - r0, _ := callgetsystemcfg(label) - n = uint64(r0) - return -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go deleted file mode 100644 index 7f2348b7d..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index 568bcd031..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index f7cb46971..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build !gccgo - -package cpu - -// cpuid is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c deleted file mode 100644 index e363c7d13..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build gccgo - -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go deleted file mode 100644 index ba49b91bd..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index aa986f778..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 76b5f507f..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !amd64,!amd64p32,!386 - -package cpu - -import ( - "io/ioutil" -) - -const ( - _AT_HWCAP = 16 - _AT_HWCAP2 = 26 - - procAuxv = "/proc/self/auxv" - - uintSize = int(32 << (^uint(0) >> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func init() { - buf, err := ioutil.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false - return - } - - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - doinit() - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index fa7fb1bd7..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 64 - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 -) - -func doinit() { - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 6c8d975d4..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux -// +build ppc64 ppc64le - -package cpu - -const cacheLineSize = 128 - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index d579eaef4..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // cryptography facilities - msa4 facility = 77 // message-security-assist extension 4 - msa8 facility = 146 // message-security-assist extension 8 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index f55e0c82c..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips64 mips64le - -package cpu - -const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index cda87b1a1..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips mipsle - -package cpu - -const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index dd1e76dc9..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !linux,arm64 - -package cpu - -const cacheLineSize = 64 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index e5037d92e..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index bd9bbda0c..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index d70d317f5..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 - -package cpu - -const cacheLineSize = 64 - -func init() { - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - osSupportsAVX := false - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - _, ebx7, _, _ := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<